From 57fb6aaca994ec4528ad078deeb249f8a22de80c Mon Sep 17 00:00:00 2001 From: Yuqi Wang Date: Fri, 14 Jan 2022 20:05:30 +0800 Subject: [PATCH 1/7] Update deps and codegen --- README.md | 2 +- go.mod | 33 +---- go.sum | 313 ++++++++++++++++++++++++++++++++++++---- hack/boilerplate.go.txt | 22 +++ hack/tools.go | 31 ++++ hack/update-codegen.sh | 5 +- hack/update-dep.sh | 3 + 7 files changed, 353 insertions(+), 56 deletions(-) create mode 100644 hack/boilerplate.go.txt create mode 100644 hack/tools.go diff --git a/README.md b/README.md index a0ce04e5..2a94a09f 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ A Framework represents an application with a set of Tasks: 12. Aligned with Kubernetes [Controller Design Guidelines](https://github.com/kubernetes/community/blob/f0dd87ad477e1e91c53866902adf7832c32ce543/contributors/devel/sig-api-machinery/controllers.md) and [API Conventions](https://github.com/kubernetes/community/blob/a2cdce51a0bbbc214f0e8813e0a877176ad3b6c9/contributors/devel/sig-architecture/api-conventions.md) ## Prerequisite -1. A Kubernetes cluster, v1.14.2 or above, on-cloud or on-premise. +1. A Kubernetes cluster, v1.16.15 or above, on-cloud or on-premise. ## Quick Start 1. [Run Controller](example/run) diff --git a/go.mod b/go.mod index 38cf2f74..58d276fd 100644 --- a/go.mod +++ b/go.mod @@ -3,35 +3,18 @@ module github.com/microsoft/frameworkcontroller go 1.12 require ( - github.com/evanphx/json-patch v4.2.0+incompatible // indirect - github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.3.1 // indirect - github.com/google/gofuzz v1.0.0 // indirect github.com/google/uuid v1.1.2 // indirect github.com/googleapis/gnostic v0.2.0 // indirect - github.com/hashicorp/golang-lru v0.5.1 // indirect github.com/imdario/mergo v0.3.7 // indirect - github.com/json-iterator/go v1.1.6 // indirect - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect - github.com/pborman/uuid v1.2.1 // indirect github.com/pkg/errors v0.8.1 - github.com/spf13/pflag v1.0.3 // indirect - golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5 // indirect - golang.org/x/net v0.0.0-20190603091049-60506f45cf65 // indirect - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect - golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed // indirect - golang.org/x/text v0.3.2 // indirect - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect google.golang.org/appengine v1.6.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.2.2 - k8s.io/api v0.0.0-20190516230258-a675ac48af67 - k8s.io/apiextensions-apiserver v0.0.0-20190516231611-bf6753f2aa24 - k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d - k8s.io/client-go v0.0.0-20190516230509-ae8359b20417 - k8s.io/klog v0.3.2 - k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 // indirect - k8s.io/utils v0.0.0-20190529001817-6999998975a7 // indirect - sigs.k8s.io/yaml v1.1.0 // indirect + gopkg.in/yaml.v2 v2.2.8 + k8s.io/api v0.16.15 + k8s.io/apiextensions-apiserver v0.16.15 + k8s.io/apimachinery v0.16.15 + k8s.io/client-go v0.16.15 + k8s.io/code-generator v0.16.15 + k8s.io/klog v1.0.0 + sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index a3cd6d7e..a3069362 100644 --- a/go.sum +++ b/go.sum @@ -1,124 +1,379 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c h1:Hww8mOyEKTeON4bZn7FrlLismspbPc1teNRUVH7wLQ8= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c h1:eSfnfIuwhxZyULg1NNuZycJcYkjYVGYe7FczwQReM6U= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= -github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5 h1:8dUaAV7K4uHsF56JQWkprecIQKdPHtR9jCHF5nB8uzc= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw= google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20190516230258-a675ac48af67 h1:BKg03K4me3EdM340RB08XB3MRlJ57KY+0f5PfI6wPZY= -k8s.io/api v0.0.0-20190516230258-a675ac48af67/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20190516231611-bf6753f2aa24 h1:SwpU8lyfh7Orv1XmveHiN6AD2LxffLHLePQ2gcav4sk= -k8s.io/apiextensions-apiserver v0.0.0-20190516231611-bf6753f2aa24/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v0.0.0-20190516230509-ae8359b20417 h1:8k7W86o/YHr0Yw+7EX1DX/ZCmPZlfIoCSqZJlhcbSnE= -k8s.io/client-go v0.0.0-20190516230509-ae8359b20417/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.16.15 h1:6yvV9YNGwnebDAsA4Sfj+1b1S9j5OYfmckjTdc9b1bI= +k8s.io/api v0.16.15/go.mod h1:8z880CLtpCJqHWe9vmBkZMQeMKHNvdTQuqLW2QUefUA= +k8s.io/apiextensions-apiserver v0.16.15 h1:+tx3F9Yxrmt5iMZsoFOjICv4FF3rHCGkNbxC1nwfTko= +k8s.io/apiextensions-apiserver v0.16.15/go.mod h1:8L35Pijr64c590A9rmne7KCktA+DjovkbnuWcz9WDjA= +k8s.io/apimachinery v0.16.15 h1:4cmEfuRsKuV8pMpaQ6z0AKEUXZ3r+u/NKaz5dvIjySk= +k8s.io/apimachinery v0.16.15/go.mod h1:xAtIC8Gj83Pn2OCs2g57wZpZembRhJhiXIlQIqanwas= +k8s.io/apiserver v0.16.15/go.mod h1:0tAQf1+cDOwK9m9it6jo1HydLjZJp74e8U4SI1sTSME= +k8s.io/client-go v0.16.15 h1:cuSmM5begnN77V0beNgmhQ9yob6TFUnN+YaqAfRBD40= +k8s.io/client-go v0.16.15/go.mod h1:onpbkg9XeonG579HOlK9RS56ixfOJdbBM5dKluyFM8c= +k8s.io/code-generator v0.16.15 h1:rDlWqu8cegrg+vVDfgeLOa6K7xHsrYzXB1vhY3AMLuo= +k8s.io/code-generator v0.16.15/go.mod h1:J2H4yLa80/c3JcoMtjfRQ/cqZNWeJnNhrCtedYrPJ+k= +k8s.io/component-base v0.16.15/go.mod h1:KV1nlaKpvS7LmzugCfL8ss3gZV0glkODyJdYNJNB5CM= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.2 h1:qvP/U6CcZ6qyi/qSHlJKdlAboCzo3mT0DAm0XAarpz4= -k8s.io/klog v0.3.2/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 h1:5sW+fEHvlJI3Ngolx30CmubFulwH28DhKjGf70Xmtco= -k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= -k8s.io/utils v0.0.0-20190529001817-6999998975a7 h1:5UOdmwfY+7XsXvo26XeCDu9GhHJPkO1z8Mcz5AHMnOE= -k8s.io/utils v0.0.0-20190529001817-6999998975a7/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31 h1:PsbYeEz2x7ll6JYUzBEG+DT78910DDTlvn5Ma10F5/E= +k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v1.0.2/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 00000000..b10ba1e1 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,22 @@ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE + diff --git a/hack/tools.go b/hack/tools.go new file mode 100644 index 00000000..3c0c8e35 --- /dev/null +++ b/hack/tools.go @@ -0,0 +1,31 @@ +//go:build tools +// +build tools + +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE + +// This package is used to "go mod vendor" the below imported dependencies, but +// it is excluded during the normal build. + +package tools + +import _ "k8s.io/code-generator" \ No newline at end of file diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index afe1140d..7f2ca8ee 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -31,10 +31,13 @@ PROJECT_DIR=${BASH_DIR}/.. cd ${PROJECT_DIR} +chmod a+x ./vendor/k8s.io/code-generator/generate-groups.sh ./vendor/k8s.io/code-generator/generate-groups.sh \ "all" \ github.com/microsoft/frameworkcontroller/pkg/client \ github.com/microsoft/frameworkcontroller/pkg/apis \ - frameworkcontroller:v1 + frameworkcontroller:v1 \ + --go-header-file "${PROJECT_DIR}"/hack/boilerplate.go.txt \ + --output-base "${PROJECT_DIR}"/../../.. echo Succeeded to update generated code diff --git a/hack/update-dep.sh b/hack/update-dep.sh index 2ae13abb..52916fca 100755 --- a/hack/update-dep.sh +++ b/hack/update-dep.sh @@ -26,11 +26,14 @@ set -o errexit set -o nounset set -o pipefail +export GO111MODULE=on + BASH_DIR=$(cd $(dirname ${BASH_SOURCE}) && pwd) PROJECT_DIR=${BASH_DIR}/.. cd ${PROJECT_DIR} +go mod tidy go mod vendor echo Succeeded to update dependent package From 15372730370ae1074411e9680d75d7b3bc312f26 Mon Sep 17 00:00:00 2001 From: Yuqi Wang Date: Fri, 14 Jan 2022 12:08:54 +0000 Subject: [PATCH 2/7] Update all --- .../v1/zz_generated.deepcopy.go | 38 +- pkg/client/clientset/versioned/clientset.go | 43 +- pkg/client/clientset/versioned/doc.go | 36 +- .../versioned/fake/clientset_generated.go | 43 +- pkg/client/clientset/versioned/fake/doc.go | 36 +- .../clientset/versioned/fake/register.go | 36 +- pkg/client/clientset/versioned/scheme/doc.go | 36 +- .../clientset/versioned/scheme/register.go | 36 +- .../typed/frameworkcontroller/v1/doc.go | 36 +- .../typed/frameworkcontroller/v1/fake/doc.go | 36 +- .../v1/fake/fake_framework.go | 36 +- .../fake/fake_frameworkcontroller_client.go | 36 +- .../typed/frameworkcontroller/v1/framework.go | 36 +- .../v1/frameworkcontroller_client.go | 39 +- .../v1/generated_expansion.go | 36 +- .../informers/externalversions/factory.go | 36 +- .../frameworkcontroller/interface.go | 36 +- .../frameworkcontroller/v1/framework.go | 36 +- .../frameworkcontroller/v1/interface.go | 36 +- .../informers/externalversions/generic.go | 36 +- .../internalinterfaces/factory_interfaces.go | 36 +- .../v1/expansion_generated.go | 36 +- .../frameworkcontroller/v1/framework.go | 36 +- .../github.com/PuerkitoBio/purell/.gitignore | 5 + .../github.com/PuerkitoBio/purell/.travis.yml | 12 + vendor/github.com/PuerkitoBio/purell/LICENSE | 12 + .../github.com/PuerkitoBio/purell/README.md | 188 + .../github.com/PuerkitoBio/purell/purell.go | 379 + .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 + .../uuid => PuerkitoBio/urlesc}/LICENSE | 2 +- .../github.com/PuerkitoBio/urlesc/README.md | 16 + .../github.com/PuerkitoBio/urlesc/urlesc.go | 180 + .../github.com/emicklei/go-restful/.gitignore | 70 + .../emicklei/go-restful/.travis.yml | 6 + .../github.com/emicklei/go-restful/CHANGES.md | 273 + vendor/github.com/emicklei/go-restful/LICENSE | 22 + .../github.com/emicklei/go-restful/Makefile | 7 + .../github.com/emicklei/go-restful/README.md | 88 + vendor/github.com/emicklei/go-restful/Srcfile | 1 + .../emicklei/go-restful/bench_test.sh | 10 + .../emicklei/go-restful/compress.go | 123 + .../emicklei/go-restful/compressor_cache.go | 103 + .../emicklei/go-restful/compressor_pools.go | 91 + .../emicklei/go-restful/compressors.go | 54 + .../emicklei/go-restful/constants.go | 30 + .../emicklei/go-restful/container.go | 377 + .../emicklei/go-restful/cors_filter.go | 202 + .../emicklei/go-restful/coverage.sh | 2 + .../github.com/emicklei/go-restful/curly.go | 164 + .../emicklei/go-restful/curly_route.go | 54 + vendor/github.com/emicklei/go-restful/doc.go | 185 + .../emicklei/go-restful/entity_accessors.go | 162 + .../github.com/emicklei/go-restful/filter.go | 35 + vendor/github.com/emicklei/go-restful/json.go | 11 + .../emicklei/go-restful/jsoniter.go | 12 + .../github.com/emicklei/go-restful/jsr311.go | 297 + .../github.com/emicklei/go-restful/log/log.go | 34 + .../github.com/emicklei/go-restful/logger.go | 32 + vendor/github.com/emicklei/go-restful/mime.go | 50 + .../emicklei/go-restful/options_filter.go | 34 + .../emicklei/go-restful/parameter.go | 143 + .../emicklei/go-restful/path_expression.go | 74 + .../emicklei/go-restful/path_processor.go | 63 + .../github.com/emicklei/go-restful/request.go | 118 + .../emicklei/go-restful/response.go | 255 + .../github.com/emicklei/go-restful/route.go | 170 + .../emicklei/go-restful/route_builder.go | 326 + .../github.com/emicklei/go-restful/router.go | 20 + .../emicklei/go-restful/service_error.go | 23 + .../emicklei/go-restful/web_service.go | 290 + .../go-restful/web_service_container.go | 39 + .../github.com/evanphx/json-patch/.travis.yml | 7 +- vendor/github.com/evanphx/json-patch/LICENSE | 2 +- .../github.com/evanphx/json-patch/README.md | 11 +- vendor/github.com/evanphx/json-patch/merge.go | 13 +- vendor/github.com/evanphx/json-patch/patch.go | 232 +- .../go-openapi/jsonpointer/.editorconfig | 26 + .../go-openapi/jsonpointer/.gitignore | 1 + .../go-openapi/jsonpointer/.travis.yml | 15 + .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 + .../github.com/go-openapi/jsonpointer/LICENSE | 202 + .../go-openapi/jsonpointer/README.md | 15 + .../github.com/go-openapi/jsonpointer/go.mod | 9 + .../github.com/go-openapi/jsonpointer/go.sum | 24 + .../go-openapi/jsonpointer/pointer.go | 390 + .../go-openapi/jsonreference/.gitignore | 1 + .../go-openapi/jsonreference/.travis.yml | 15 + .../jsonreference/CODE_OF_CONDUCT.md | 74 + .../go-openapi/jsonreference/LICENSE | 202 + .../go-openapi/jsonreference/README.md | 15 + .../go-openapi/jsonreference/go.mod | 10 + .../go-openapi/jsonreference/go.sum | 36 + .../go-openapi/jsonreference/reference.go | 156 + .../github.com/go-openapi/spec/.editorconfig | 26 + vendor/github.com/go-openapi/spec/.gitignore | 2 + .../github.com/go-openapi/spec/.golangci.yml | 23 + vendor/github.com/go-openapi/spec/.travis.yml | 15 + .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/spec/LICENSE | 202 + vendor/github.com/go-openapi/spec/README.md | 10 + vendor/github.com/go-openapi/spec/bindata.go | 297 + vendor/github.com/go-openapi/spec/cache.go | 60 + .../go-openapi/spec/contact_info.go | 24 + vendor/github.com/go-openapi/spec/debug.go | 47 + vendor/github.com/go-openapi/spec/expander.go | 650 + .../go-openapi/spec/external_docs.go | 24 + vendor/github.com/go-openapi/spec/go.mod | 14 + vendor/github.com/go-openapi/spec/go.sum | 66 + vendor/github.com/go-openapi/spec/header.go | 197 + vendor/github.com/go-openapi/spec/info.go | 165 + vendor/github.com/go-openapi/spec/items.go | 244 + vendor/github.com/go-openapi/spec/license.go | 23 + .../github.com/go-openapi/spec/normalizer.go | 152 + .../github.com/go-openapi/spec/operation.go | 398 + .../github.com/go-openapi/spec/parameter.go | 321 + .../github.com/go-openapi/spec/path_item.go | 87 + vendor/github.com/go-openapi/spec/paths.go | 97 + vendor/github.com/go-openapi/spec/ref.go | 191 + vendor/github.com/go-openapi/spec/response.go | 131 + .../github.com/go-openapi/spec/responses.go | 127 + vendor/github.com/go-openapi/spec/schema.go | 596 + .../go-openapi/spec/schema_loader.go | 275 + .../go-openapi/spec/security_scheme.go | 140 + vendor/github.com/go-openapi/spec/spec.go | 86 + vendor/github.com/go-openapi/spec/swagger.go | 448 + vendor/github.com/go-openapi/spec/tag.go | 75 + vendor/github.com/go-openapi/spec/unused.go | 174 + .../github.com/go-openapi/spec/xml_object.go | 68 + .../github.com/go-openapi/swag/.editorconfig | 26 + vendor/github.com/go-openapi/swag/.gitignore | 4 + .../github.com/go-openapi/swag/.golangci.yml | 22 + vendor/github.com/go-openapi/swag/.travis.yml | 15 + .../go-openapi/swag/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/swag/LICENSE | 202 + vendor/github.com/go-openapi/swag/README.md | 22 + vendor/github.com/go-openapi/swag/convert.go | 208 + .../go-openapi/swag/convert_types.go | 595 + vendor/github.com/go-openapi/swag/doc.go | 32 + vendor/github.com/go-openapi/swag/go.mod | 14 + vendor/github.com/go-openapi/swag/go.sum | 20 + vendor/github.com/go-openapi/swag/json.go | 312 + vendor/github.com/go-openapi/swag/loading.go | 80 + .../github.com/go-openapi/swag/name_lexem.go | 87 + vendor/github.com/go-openapi/swag/net.go | 38 + vendor/github.com/go-openapi/swag/path.go | 59 + .../github.com/go-openapi/swag/post_go18.go | 23 + .../github.com/go-openapi/swag/post_go19.go | 67 + vendor/github.com/go-openapi/swag/pre_go18.go | 23 + vendor/github.com/go-openapi/swag/pre_go19.go | 69 + vendor/github.com/go-openapi/swag/split.go | 262 + vendor/github.com/go-openapi/swag/util.go | 385 + vendor/github.com/go-openapi/swag/yaml.go | 246 + .../gogo/protobuf/proto/extensions.go | 1 + .../gogo/protobuf/proto/extensions_gogo.go | 21 + vendor/github.com/google/go-cmp/LICENSE | 27 + .../github.com/google/go-cmp/cmp/compare.go | 616 + .../google/go-cmp/cmp/export_panic.go | 15 + .../google/go-cmp/cmp/export_unsafe.go | 23 + .../go-cmp/cmp/internal/diff/debug_disable.go | 17 + .../go-cmp/cmp/internal/diff/debug_enable.go | 122 + .../google/go-cmp/cmp/internal/diff/diff.go | 372 + .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../cmp/internal/flags/toolchain_legacy.go | 10 + .../cmp/internal/flags/toolchain_recent.go | 10 + .../go-cmp/cmp/internal/function/func.go | 99 + .../cmp/internal/value/pointer_purego.go | 23 + .../cmp/internal/value/pointer_unsafe.go | 26 + .../google/go-cmp/cmp/internal/value/sort.go | 104 + .../google/go-cmp/cmp/internal/value/zero.go | 45 + .../github.com/google/go-cmp/cmp/options.go | 524 + vendor/github.com/google/go-cmp/cmp/path.go | 308 + vendor/github.com/google/go-cmp/cmp/report.go | 51 + .../google/go-cmp/cmp/report_compare.go | 296 + .../google/go-cmp/cmp/report_reflect.go | 279 + .../google/go-cmp/cmp/report_slices.go | 333 + .../google/go-cmp/cmp/report_text.go | 382 + .../google/go-cmp/cmp/report_value.go | 121 + vendor/github.com/json-iterator/go/adapter.go | 2 +- vendor/github.com/json-iterator/go/go.mod | 11 + vendor/github.com/json-iterator/go/go.sum | 14 + .../github.com/json-iterator/go/iter_skip.go | 25 +- .../json-iterator/go/reflect_native.go | 14 +- .../go/reflect_struct_decoder.go | 2 +- .../json-iterator/go/stream_float.go | 17 + vendor/github.com/mailru/easyjson/LICENSE | 7 + .../github.com/mailru/easyjson/buffer/pool.go | 270 + .../mailru/easyjson/jlexer/bytestostr.go | 24 + .../easyjson/jlexer/bytestostr_nounsafe.go | 13 + .../mailru/easyjson/jlexer/error.go | 15 + .../mailru/easyjson/jlexer/lexer.go | 1182 + .../mailru/easyjson/jwriter/writer.go | 390 + .../github.com/modern-go/reflect2/type_map.go | 16 +- vendor/github.com/pborman/uuid/.travis.yml | 10 - .../github.com/pborman/uuid/CONTRIBUTING.md | 10 - vendor/github.com/pborman/uuid/CONTRIBUTORS | 1 - vendor/github.com/pborman/uuid/README.md | 15 - vendor/github.com/pborman/uuid/dce.go | 84 - vendor/github.com/pborman/uuid/doc.go | 13 - vendor/github.com/pborman/uuid/go.mod | 3 - vendor/github.com/pborman/uuid/go.sum | 2 - vendor/github.com/pborman/uuid/hash.go | 53 - vendor/github.com/pborman/uuid/marshal.go | 85 - vendor/github.com/pborman/uuid/node.go | 50 - vendor/github.com/pborman/uuid/sql.go | 68 - vendor/github.com/pborman/uuid/time.go | 57 - vendor/github.com/pborman/uuid/util.go | 32 - vendor/github.com/pborman/uuid/uuid.go | 162 - vendor/github.com/pborman/uuid/version1.go | 23 - vendor/github.com/pborman/uuid/version4.go | 26 - vendor/github.com/spf13/pflag/.travis.yml | 7 +- vendor/github.com/spf13/pflag/README.md | 4 +- vendor/github.com/spf13/pflag/bool_slice.go | 38 + vendor/github.com/spf13/pflag/count.go | 4 +- .../github.com/spf13/pflag/duration_slice.go | 38 + vendor/github.com/spf13/pflag/flag.go | 16 +- .../github.com/spf13/pflag/float32_slice.go | 174 + .../github.com/spf13/pflag/float64_slice.go | 166 + vendor/github.com/spf13/pflag/go.mod | 3 + vendor/github.com/spf13/pflag/go.sum | 0 vendor/github.com/spf13/pflag/int32_slice.go | 174 + vendor/github.com/spf13/pflag/int64_slice.go | 166 + vendor/github.com/spf13/pflag/int_slice.go | 30 + vendor/github.com/spf13/pflag/ip_slice.go | 40 +- vendor/github.com/spf13/pflag/string_array.go | 26 + vendor/github.com/spf13/pflag/string_slice.go | 22 +- .../github.com/spf13/pflag/string_to_int64.go | 149 + vendor/github.com/spf13/pflag/uint_slice.go | 42 + .../x/crypto/ssh/terminal/terminal.go | 17 +- .../x/crypto/ssh/terminal/util_windows.go | 4 +- vendor/golang.org/x/net/http2/server.go | 72 +- vendor/golang.org/x/net/http2/transport.go | 2 +- vendor/golang.org/x/net/http2/writesched.go | 8 +- .../x/net/http2/writesched_random.go | 9 +- .../golang.org/x/sys/unix/affinity_linux.go | 8 +- vendor/golang.org/x/sys/unix/dirent.go | 91 +- vendor/golang.org/x/sys/unix/endian_little.go | 2 +- vendor/golang.org/x/sys/unix/mkall.sh | 8 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 4 + vendor/golang.org/x/sys/unix/mkpost.go | 4 +- vendor/golang.org/x/sys/unix/mksysnum.go | 2 +- .../x/sys/unix/readdirent_getdents.go | 12 + .../x/sys/unix/readdirent_getdirentries.go | 19 + vendor/golang.org/x/sys/unix/syscall_aix.go | 18 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 12 +- .../golang.org/x/sys/unix/syscall_darwin.go | 13 +- .../x/sys/unix/syscall_darwin_386.go | 2 + .../x/sys/unix/syscall_darwin_amd64.go | 2 + .../x/sys/unix/syscall_darwin_arm.go | 4 + .../x/sys/unix/syscall_darwin_arm64.go | 4 + .../x/sys/unix/syscall_dragonfly.go | 17 + .../golang.org/x/sys/unix/syscall_freebsd.go | 94 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 25 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 37 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 37 +- .../golang.org/x/sys/unix/syscall_solaris.go | 17 + vendor/golang.org/x/sys/unix/types_freebsd.go | 50 +- vendor/golang.org/x/sys/unix/types_netbsd.go | 1 + vendor/golang.org/x/sys/unix/types_openbsd.go | 1 + .../x/sys/unix/zerrors_linux_386.go | 75 + .../x/sys/unix/zerrors_linux_amd64.go | 75 + .../x/sys/unix/zerrors_linux_arm.go | 75 + .../x/sys/unix/zerrors_linux_arm64.go | 75 + .../x/sys/unix/zerrors_linux_mips.go | 75 + .../x/sys/unix/zerrors_linux_mips64.go | 75 + .../x/sys/unix/zerrors_linux_mips64le.go | 75 + .../x/sys/unix/zerrors_linux_mipsle.go | 75 + .../x/sys/unix/zerrors_linux_ppc64.go | 75 + .../x/sys/unix/zerrors_linux_ppc64le.go | 75 + .../x/sys/unix/zerrors_linux_riscv64.go | 75 + .../x/sys/unix/zerrors_linux_s390x.go | 75 + .../x/sys/unix/zerrors_linux_sparc64.go | 75 + .../x/sys/unix/zsyscall_darwin_386.1_11.go | 20 +- .../x/sys/unix/zsyscall_darwin_386.go | 30 +- .../x/sys/unix/zsyscall_darwin_386.s | 4 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 30 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 4 +- .../x/sys/unix/zsyscall_darwin_arm.go | 15 - .../x/sys/unix/zsyscall_darwin_arm.s | 2 - .../x/sys/unix/zsyscall_darwin_arm64.go | 15 - .../x/sys/unix/zsyscall_darwin_arm64.s | 2 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 17 + .../x/sys/unix/zsyscall_freebsd_386.go | 12 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 12 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 12 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 12 +- .../x/sys/unix/zsyscall_linux_386.go | 20 + .../x/sys/unix/zsyscall_linux_amd64.go | 20 + .../x/sys/unix/zsyscall_linux_arm.go | 20 + .../x/sys/unix/zsyscall_linux_arm64.go | 20 + .../x/sys/unix/zsyscall_linux_mips.go | 20 + .../x/sys/unix/zsyscall_linux_mips64.go | 20 + .../x/sys/unix/zsyscall_linux_mips64le.go | 20 + .../x/sys/unix/zsyscall_linux_mipsle.go | 20 + .../x/sys/unix/zsyscall_linux_ppc64.go | 20 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 20 + .../x/sys/unix/zsyscall_linux_riscv64.go | 20 + .../x/sys/unix/zsyscall_linux_s390x.go | 20 + .../x/sys/unix/zsyscall_linux_sparc64.go | 20 + .../x/sys/unix/zsyscall_netbsd_386.go | 2 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 2 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 2 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 2 +- .../x/sys/unix/zsyscall_openbsd_386.go | 2 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 2 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 2 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 2 +- .../x/sys/unix/zsysnum_freebsd_386.go | 23 +- .../x/sys/unix/zsysnum_freebsd_amd64.go | 23 +- .../x/sys/unix/zsysnum_freebsd_arm.go | 23 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 445 +- .../x/sys/unix/zsysnum_linux_386.go | 6 + .../x/sys/unix/zsysnum_linux_amd64.go | 6 + .../x/sys/unix/zsysnum_linux_arm.go | 6 + .../x/sys/unix/zsysnum_linux_arm64.go | 6 + .../x/sys/unix/zsysnum_linux_mips.go | 6 + .../x/sys/unix/zsysnum_linux_mips64.go | 6 + .../x/sys/unix/zsysnum_linux_mips64le.go | 6 + .../x/sys/unix/zsysnum_linux_mipsle.go | 6 + .../x/sys/unix/zsysnum_linux_ppc64.go | 6 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 6 + .../x/sys/unix/zsysnum_linux_riscv64.go | 6 + .../x/sys/unix/zsysnum_linux_s390x.go | 6 + .../x/sys/unix/zsysnum_linux_sparc64.go | 6 + .../x/sys/unix/ztypes_freebsd_386.go | 103 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 110 +- .../x/sys/unix/ztypes_freebsd_arm.go | 87 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 88 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 54 + .../x/sys/unix/ztypes_linux_amd64.go | 55 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 54 + .../x/sys/unix/ztypes_linux_arm64.go | 55 + .../x/sys/unix/ztypes_linux_mips.go | 54 + .../x/sys/unix/ztypes_linux_mips64.go | 55 + .../x/sys/unix/ztypes_linux_mips64le.go | 55 + .../x/sys/unix/ztypes_linux_mipsle.go | 54 + .../x/sys/unix/ztypes_linux_ppc64.go | 55 + .../x/sys/unix/ztypes_linux_ppc64le.go | 55 + .../x/sys/unix/ztypes_linux_riscv64.go | 56 + .../x/sys/unix/ztypes_linux_s390x.go | 55 + .../x/sys/unix/ztypes_linux_sparc64.go | 55 + .../x/sys/unix/ztypes_netbsd_386.go | 1 + .../x/sys/unix/ztypes_netbsd_amd64.go | 1 + .../x/sys/unix/ztypes_netbsd_arm.go | 1 + .../x/sys/unix/ztypes_netbsd_arm64.go | 1 + .../x/sys/unix/ztypes_openbsd_386.go | 1 + .../x/sys/unix/ztypes_openbsd_amd64.go | 1 + .../x/sys/unix/ztypes_openbsd_arm.go | 1 + .../x/sys/unix/ztypes_openbsd_arm64.go | 1 + vendor/golang.org/x/sys/windows/mkerrors.bash | 2 +- .../x/sys/windows/mkknownfolderids.bash | 27 + vendor/golang.org/x/sys/windows/service.go | 11 + .../x/sys/windows/syscall_windows.go | 89 +- .../golang.org/x/sys/windows/types_windows.go | 86 +- .../x/sys/windows/zerrors_windows.go | 2 +- .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 137 +- vendor/golang.org/x/text/width/gen.go | 115 + vendor/golang.org/x/text/width/gen_common.go | 96 + vendor/golang.org/x/text/width/gen_trieval.go | 34 + vendor/golang.org/x/text/width/kind_string.go | 28 + .../golang.org/x/text/width/tables10.0.0.go | 1318 + .../golang.org/x/text/width/tables11.0.0.go | 1330 + vendor/golang.org/x/text/width/tables9.0.0.go | 1286 + vendor/golang.org/x/text/width/transform.go | 239 + vendor/golang.org/x/text/width/trieval.go | 30 + vendor/golang.org/x/text/width/width.go | 206 + vendor/golang.org/x/tools/AUTHORS | 3 + vendor/golang.org/x/tools/CONTRIBUTORS | 3 + vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/go/ast/astutil/enclosing.go | 627 + .../x/tools/go/ast/astutil/imports.go | 481 + .../x/tools/go/ast/astutil/rewrite.go | 477 + .../golang.org/x/tools/go/ast/astutil/util.go | 14 + .../x/tools/go/gcexportdata/gcexportdata.go | 109 + .../x/tools/go/gcexportdata/importer.go | 73 + .../x/tools/go/gcexportdata/main.go | 99 + .../x/tools/go/internal/gcimporter/bexport.go | 852 + .../x/tools/go/internal/gcimporter/bimport.go | 1037 + .../go/internal/gcimporter/exportdata.go | 93 + .../go/internal/gcimporter/gcimporter.go | 1078 + .../x/tools/go/internal/gcimporter/iexport.go | 723 + .../x/tools/go/internal/gcimporter/iimport.go | 606 + .../go/internal/gcimporter/newInterface10.go | 21 + .../go/internal/gcimporter/newInterface11.go | 13 + .../tools/go/internal/packagesdriver/sizes.go | 173 + vendor/golang.org/x/tools/go/packages/doc.go | 222 + .../x/tools/go/packages/external.go | 94 + .../golang.org/x/tools/go/packages/golist.go | 1021 + .../x/tools/go/packages/golist_overlay.go | 310 + .../x/tools/go/packages/packages.go | 1113 + .../golang.org/x/tools/go/packages/visit.go | 55 + vendor/golang.org/x/tools/imports/forward.go | 62 + .../x/tools/internal/fastwalk/fastwalk.go | 196 + .../fastwalk/fastwalk_dirent_fileno.go | 13 + .../internal/fastwalk/fastwalk_dirent_ino.go | 14 + .../fastwalk/fastwalk_dirent_namlen_bsd.go | 13 + .../fastwalk/fastwalk_dirent_namlen_linux.go | 29 + .../internal/fastwalk/fastwalk_portable.go | 37 + .../tools/internal/fastwalk/fastwalk_unix.go | 127 + .../x/tools/internal/gopathwalk/walk.go | 268 + .../x/tools/internal/imports/fix.go | 1385 + .../x/tools/internal/imports/imports.go | 388 + .../x/tools/internal/imports/mkindex.go | 173 + .../x/tools/internal/imports/mkstdlib.go | 128 + .../x/tools/internal/imports/mod.go | 500 + .../x/tools/internal/imports/mod_cache.go | 121 + .../x/tools/internal/imports/sortimports.go | 280 + .../x/tools/internal/imports/zstdlib.go | 10377 ++++ .../x/tools/internal/module/module.go | 540 + .../x/tools/internal/semver/semver.go | 388 + vendor/gonum.org/v1/gonum/AUTHORS | 89 + vendor/gonum.org/v1/gonum/CONTRIBUTORS | 91 + vendor/gonum.org/v1/gonum/LICENSE | 23 + vendor/gonum.org/v1/gonum/blas/README.md | 47 + vendor/gonum.org/v1/gonum/blas/blas.go | 283 + .../gonum.org/v1/gonum/blas/blas64/blas64.go | 469 + vendor/gonum.org/v1/gonum/blas/blas64/conv.go | 277 + .../v1/gonum/blas/blas64/conv_symmetric.go | 153 + vendor/gonum.org/v1/gonum/blas/blas64/doc.go | 6 + .../v1/gonum/blas/cblas128/cblas128.go | 508 + .../gonum.org/v1/gonum/blas/cblas128/conv.go | 279 + .../v1/gonum/blas/cblas128/conv_hermitian.go | 155 + .../v1/gonum/blas/cblas128/conv_symmetric.go | 155 + .../gonum.org/v1/gonum/blas/cblas128/doc.go | 6 + .../gonum.org/v1/gonum/blas/conversions.bash | 159 + vendor/gonum.org/v1/gonum/blas/doc.go | 108 + vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go | 314 + vendor/gonum.org/v1/gonum/blas/gonum/doc.go | 88 + .../gonum.org/v1/gonum/blas/gonum/errors.go | 35 + vendor/gonum.org/v1/gonum/blas/gonum/gemv.go | 190 + vendor/gonum.org/v1/gonum/blas/gonum/gonum.go | 58 + .../v1/gonum/blas/gonum/level1cmplx128.go | 445 + .../v1/gonum/blas/gonum/level1cmplx64.go | 467 + .../v1/gonum/blas/gonum/level1float32.go | 644 + .../gonum/blas/gonum/level1float32_dsdot.go | 53 + .../v1/gonum/blas/gonum/level1float32_sdot.go | 53 + .../gonum/blas/gonum/level1float32_sdsdot.go | 53 + .../v1/gonum/blas/gonum/level1float64.go | 620 + .../v1/gonum/blas/gonum/level1float64_ddot.go | 49 + .../v1/gonum/blas/gonum/level2cmplx128.go | 2906 + .../v1/gonum/blas/gonum/level2cmplx64.go | 2942 + .../v1/gonum/blas/gonum/level2float32.go | 2296 + .../v1/gonum/blas/gonum/level2float64.go | 2264 + .../v1/gonum/blas/gonum/level3cmplx128.go | 1715 + .../v1/gonum/blas/gonum/level3cmplx64.go | 1735 + .../v1/gonum/blas/gonum/level3float32.go | 876 + .../v1/gonum/blas/gonum/level3float64.go | 864 + vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go | 318 + .../v1/gonum/blas/gonum/single_precision.bash | 218 + vendor/gonum.org/v1/gonum/floats/README.md | 4 + vendor/gonum.org/v1/gonum/floats/doc.go | 11 + vendor/gonum.org/v1/gonum/floats/floats.go | 933 + vendor/gonum.org/v1/gonum/graph/.gitignore | 1 + vendor/gonum.org/v1/gonum/graph/README.md | 3 + vendor/gonum.org/v1/gonum/graph/doc.go | 9 + vendor/gonum.org/v1/gonum/graph/graph.go | 282 + .../v1/gonum/graph/internal/linear/doc.go | 6 + .../v1/gonum/graph/internal/linear/linear.go | 73 + .../v1/gonum/graph/internal/ordered/doc.go | 6 + .../v1/gonum/graph/internal/ordered/sort.go | 93 + .../v1/gonum/graph/internal/set/doc.go | 6 + .../v1/gonum/graph/internal/set/same.go | 36 + .../graph/internal/set/same_appengine.go | 36 + .../v1/gonum/graph/internal/set/set.go | 228 + .../v1/gonum/graph/internal/uid/uid.go | 54 + .../gonum.org/v1/gonum/graph/iterator/doc.go | 9 + .../v1/gonum/graph/iterator/edges.go | 131 + .../v1/gonum/graph/iterator/lines.go | 131 + .../v1/gonum/graph/iterator/nodes.go | 125 + vendor/gonum.org/v1/gonum/graph/multigraph.go | 198 + .../gonum.org/v1/gonum/graph/nodes_edges.go | 300 + .../graph/simple/dense_directed_matrix.go | 301 + .../graph/simple/dense_undirected_matrix.go | 268 + .../v1/gonum/graph/simple/directed.go | 235 + vendor/gonum.org/v1/gonum/graph/simple/doc.go | 9 + .../gonum.org/v1/gonum/graph/simple/simple.go | 72 + .../v1/gonum/graph/simple/undirected.go | 216 + .../gonum/graph/simple/weighted_directed.go | 279 + .../gonum/graph/simple/weighted_undirected.go | 273 + .../v1/gonum/graph/topo/bron_kerbosch.go | 250 + .../v1/gonum/graph/topo/clique_graph.go | 111 + vendor/gonum.org/v1/gonum/graph/topo/doc.go | 6 + .../v1/gonum/graph/topo/johnson_cycles.go | 285 + .../v1/gonum/graph/topo/non_tomita_choice.go | 9 + .../v1/gonum/graph/topo/paton_cycles.go | 83 + .../gonum.org/v1/gonum/graph/topo/tarjan.go | 199 + .../v1/gonum/graph/topo/tomita_choice.go | 9 + vendor/gonum.org/v1/gonum/graph/topo/topo.go | 68 + .../gonum.org/v1/gonum/graph/traverse/doc.go | 6 + .../v1/gonum/graph/traverse/traverse.go | 231 + vendor/gonum.org/v1/gonum/graph/undirect.go | 270 + .../gonum/internal/asm/c128/axpyinc_amd64.s | 134 + .../gonum/internal/asm/c128/axpyincto_amd64.s | 141 + .../internal/asm/c128/axpyunitary_amd64.s | 122 + .../internal/asm/c128/axpyunitaryto_amd64.s | 123 + .../v1/gonum/internal/asm/c128/doc.go | 6 + .../gonum/internal/asm/c128/dotcinc_amd64.s | 153 + .../internal/asm/c128/dotcunitary_amd64.s | 143 + .../gonum/internal/asm/c128/dotuinc_amd64.s | 141 + .../internal/asm/c128/dotuunitary_amd64.s | 130 + .../gonum/internal/asm/c128/dscalinc_amd64.s | 69 + .../internal/asm/c128/dscalunitary_amd64.s | 66 + .../v1/gonum/internal/asm/c128/scal.go | 31 + .../internal/asm/c128/scalUnitary_amd64.s | 116 + .../gonum/internal/asm/c128/scalinc_amd64.s | 121 + .../v1/gonum/internal/asm/c128/stubs_amd64.go | 96 + .../v1/gonum/internal/asm/c128/stubs_noasm.go | 163 + .../v1/gonum/internal/asm/c64/axpyinc_amd64.s | 151 + .../gonum/internal/asm/c64/axpyincto_amd64.s | 156 + .../internal/asm/c64/axpyunitary_amd64.s | 160 + .../internal/asm/c64/axpyunitaryto_amd64.s | 157 + .../v1/gonum/internal/asm/c64/conj.go | 7 + .../v1/gonum/internal/asm/c64/doc.go | 6 + .../v1/gonum/internal/asm/c64/dotcinc_amd64.s | 160 + .../internal/asm/c64/dotcunitary_amd64.s | 208 + .../v1/gonum/internal/asm/c64/dotuinc_amd64.s | 148 + .../internal/asm/c64/dotuunitary_amd64.s | 197 + .../v1/gonum/internal/asm/c64/scal.go | 79 + .../v1/gonum/internal/asm/c64/stubs_amd64.go | 68 + .../v1/gonum/internal/asm/c64/stubs_noasm.go | 113 + .../v1/gonum/internal/asm/f32/axpyinc_amd64.s | 73 + .../gonum/internal/asm/f32/axpyincto_amd64.s | 78 + .../internal/asm/f32/axpyunitary_amd64.s | 97 + .../internal/asm/f32/axpyunitaryto_amd64.s | 98 + .../v1/gonum/internal/asm/f32/ddotinc_amd64.s | 91 + .../internal/asm/f32/ddotunitary_amd64.s | 110 + .../v1/gonum/internal/asm/f32/doc.go | 6 + .../v1/gonum/internal/asm/f32/dotinc_amd64.s | 85 + .../gonum/internal/asm/f32/dotunitary_amd64.s | 106 + .../v1/gonum/internal/asm/f32/ge_amd64.go | 15 + .../v1/gonum/internal/asm/f32/ge_amd64.s | 757 + .../v1/gonum/internal/asm/f32/ge_noasm.go | 36 + .../v1/gonum/internal/asm/f32/scal.go | 55 + .../v1/gonum/internal/asm/f32/stubs_amd64.go | 68 + .../v1/gonum/internal/asm/f32/stubs_noasm.go | 113 + .../v1/gonum/internal/asm/f64/abssum_amd64.s | 82 + .../gonum/internal/asm/f64/abssuminc_amd64.s | 90 + .../v1/gonum/internal/asm/f64/add_amd64.s | 66 + .../gonum/internal/asm/f64/addconst_amd64.s | 53 + .../v1/gonum/internal/asm/f64/axpy.go | 57 + .../v1/gonum/internal/asm/f64/axpyinc_amd64.s | 142 + .../gonum/internal/asm/f64/axpyincto_amd64.s | 148 + .../internal/asm/f64/axpyunitary_amd64.s | 134 + .../internal/asm/f64/axpyunitaryto_amd64.s | 140 + .../v1/gonum/internal/asm/f64/cumprod_amd64.s | 71 + .../v1/gonum/internal/asm/f64/cumsum_amd64.s | 64 + .../v1/gonum/internal/asm/f64/div_amd64.s | 67 + .../v1/gonum/internal/asm/f64/divto_amd64.s | 73 + .../v1/gonum/internal/asm/f64/doc.go | 6 + .../v1/gonum/internal/asm/f64/dot.go | 35 + .../v1/gonum/internal/asm/f64/dot_amd64.s | 145 + .../v1/gonum/internal/asm/f64/ge_amd64.go | 22 + .../v1/gonum/internal/asm/f64/ge_noasm.go | 118 + .../v1/gonum/internal/asm/f64/gemvN_amd64.s | 685 + .../v1/gonum/internal/asm/f64/gemvT_amd64.s | 745 + .../v1/gonum/internal/asm/f64/ger_amd64.s | 591 + .../v1/gonum/internal/asm/f64/l1norm_amd64.s | 58 + .../gonum/internal/asm/f64/linfnorm_amd64.s | 57 + .../v1/gonum/internal/asm/f64/scal.go | 57 + .../v1/gonum/internal/asm/f64/scalinc_amd64.s | 113 + .../gonum/internal/asm/f64/scalincto_amd64.s | 122 + .../internal/asm/f64/scalunitary_amd64.s | 112 + .../internal/asm/f64/scalunitaryto_amd64.s | 113 + .../v1/gonum/internal/asm/f64/stubs_amd64.go | 172 + .../v1/gonum/internal/asm/f64/stubs_noasm.go | 170 + .../v1/gonum/internal/asm/f64/sum_amd64.s | 100 + .../v1/gonum/internal/cmplx64/abs.go | 14 + .../v1/gonum/internal/cmplx64/conj.go | 12 + .../v1/gonum/internal/cmplx64/doc.go | 7 + .../v1/gonum/internal/cmplx64/isinf.go | 25 + .../v1/gonum/internal/cmplx64/isnan.go | 29 + .../v1/gonum/internal/cmplx64/sqrt.go | 108 + .../gonum.org/v1/gonum/internal/math32/doc.go | 7 + .../v1/gonum/internal/math32/math.go | 111 + .../v1/gonum/internal/math32/signbit.go | 16 + .../v1/gonum/internal/math32/sqrt.go | 25 + .../v1/gonum/internal/math32/sqrt_amd64.go | 20 + .../v1/gonum/internal/math32/sqrt_amd64.s | 20 + vendor/gonum.org/v1/gonum/lapack/.gitignore | 0 vendor/gonum.org/v1/gonum/lapack/README.md | 28 + vendor/gonum.org/v1/gonum/lapack/doc.go | 6 + .../gonum.org/v1/gonum/lapack/gonum/dbdsqr.go | 505 + .../gonum.org/v1/gonum/lapack/gonum/dgebak.go | 89 + .../gonum.org/v1/gonum/lapack/gonum/dgebal.go | 239 + .../gonum.org/v1/gonum/lapack/gonum/dgebd2.go | 86 + .../gonum.org/v1/gonum/lapack/gonum/dgebrd.go | 161 + .../gonum.org/v1/gonum/lapack/gonum/dgecon.go | 92 + .../gonum.org/v1/gonum/lapack/gonum/dgeev.go | 279 + .../gonum.org/v1/gonum/lapack/gonum/dgehd2.go | 97 + .../gonum.org/v1/gonum/lapack/gonum/dgehrd.go | 194 + .../gonum.org/v1/gonum/lapack/gonum/dgelq2.go | 65 + .../gonum.org/v1/gonum/lapack/gonum/dgelqf.go | 97 + .../gonum.org/v1/gonum/lapack/gonum/dgels.go | 219 + .../gonum.org/v1/gonum/lapack/gonum/dgeql2.go | 61 + .../gonum.org/v1/gonum/lapack/gonum/dgeqp3.go | 186 + .../gonum.org/v1/gonum/lapack/gonum/dgeqr2.go | 76 + .../gonum.org/v1/gonum/lapack/gonum/dgeqrf.go | 108 + .../gonum.org/v1/gonum/lapack/gonum/dgerq2.go | 68 + .../gonum.org/v1/gonum/lapack/gonum/dgerqf.go | 129 + .../gonum.org/v1/gonum/lapack/gonum/dgesvd.go | 1374 + .../gonum.org/v1/gonum/lapack/gonum/dgetf2.go | 84 + .../gonum.org/v1/gonum/lapack/gonum/dgetrf.go | 85 + .../gonum.org/v1/gonum/lapack/gonum/dgetri.go | 116 + .../gonum.org/v1/gonum/lapack/gonum/dgetrs.go | 72 + .../v1/gonum/lapack/gonum/dggsvd3.go | 242 + .../v1/gonum/lapack/gonum/dggsvp3.go | 281 + .../gonum.org/v1/gonum/lapack/gonum/dhseqr.go | 252 + .../gonum.org/v1/gonum/lapack/gonum/dlabrd.go | 173 + .../gonum.org/v1/gonum/lapack/gonum/dlacn2.go | 134 + .../gonum.org/v1/gonum/lapack/gonum/dlacpy.go | 59 + .../gonum.org/v1/gonum/lapack/gonum/dlae2.go | 49 + .../gonum.org/v1/gonum/lapack/gonum/dlaev2.go | 82 + .../gonum.org/v1/gonum/lapack/gonum/dlaexc.go | 269 + .../gonum.org/v1/gonum/lapack/gonum/dlags2.go | 182 + .../gonum.org/v1/gonum/lapack/gonum/dlahqr.go | 431 + .../gonum.org/v1/gonum/lapack/gonum/dlahr2.go | 195 + .../gonum.org/v1/gonum/lapack/gonum/dlaln2.go | 405 + .../gonum.org/v1/gonum/lapack/gonum/dlange.go | 89 + .../gonum.org/v1/gonum/lapack/gonum/dlanst.go | 75 + .../gonum.org/v1/gonum/lapack/gonum/dlansy.go | 132 + .../gonum.org/v1/gonum/lapack/gonum/dlantr.go | 260 + .../gonum.org/v1/gonum/lapack/gonum/dlanv2.go | 132 + .../gonum.org/v1/gonum/lapack/gonum/dlapll.go | 55 + .../gonum.org/v1/gonum/lapack/gonum/dlapmt.go | 89 + .../gonum.org/v1/gonum/lapack/gonum/dlapy2.go | 14 + .../gonum.org/v1/gonum/lapack/gonum/dlaqp2.go | 127 + .../gonum.org/v1/gonum/lapack/gonum/dlaqps.go | 244 + .../v1/gonum/lapack/gonum/dlaqr04.go | 478 + .../gonum.org/v1/gonum/lapack/gonum/dlaqr1.go | 59 + .../v1/gonum/lapack/gonum/dlaqr23.go | 415 + .../gonum.org/v1/gonum/lapack/gonum/dlaqr5.go | 644 + .../gonum.org/v1/gonum/lapack/gonum/dlarf.go | 101 + .../gonum.org/v1/gonum/lapack/gonum/dlarfb.go | 449 + .../gonum.org/v1/gonum/lapack/gonum/dlarfg.go | 71 + .../gonum.org/v1/gonum/lapack/gonum/dlarft.go | 166 + .../gonum.org/v1/gonum/lapack/gonum/dlarfx.go | 550 + .../gonum.org/v1/gonum/lapack/gonum/dlartg.go | 80 + .../gonum.org/v1/gonum/lapack/gonum/dlas2.go | 43 + .../gonum.org/v1/gonum/lapack/gonum/dlascl.go | 111 + .../gonum.org/v1/gonum/lapack/gonum/dlaset.go | 57 + .../gonum.org/v1/gonum/lapack/gonum/dlasq1.go | 100 + .../gonum.org/v1/gonum/lapack/gonum/dlasq2.go | 369 + .../gonum.org/v1/gonum/lapack/gonum/dlasq3.go | 172 + .../gonum.org/v1/gonum/lapack/gonum/dlasq4.go | 249 + .../gonum.org/v1/gonum/lapack/gonum/dlasq5.go | 140 + .../gonum.org/v1/gonum/lapack/gonum/dlasq6.go | 118 + .../gonum.org/v1/gonum/lapack/gonum/dlasr.go | 279 + .../gonum.org/v1/gonum/lapack/gonum/dlasrt.go | 36 + .../gonum.org/v1/gonum/lapack/gonum/dlassq.go | 41 + .../gonum.org/v1/gonum/lapack/gonum/dlasv2.go | 115 + .../gonum.org/v1/gonum/lapack/gonum/dlaswp.go | 52 + .../gonum.org/v1/gonum/lapack/gonum/dlasy2.go | 290 + .../gonum.org/v1/gonum/lapack/gonum/dlatrd.go | 165 + .../gonum.org/v1/gonum/lapack/gonum/dlatrs.go | 359 + .../gonum.org/v1/gonum/lapack/gonum/dlauu2.go | 64 + .../gonum.org/v1/gonum/lapack/gonum/dlauum.go | 81 + vendor/gonum.org/v1/gonum/lapack/gonum/doc.go | 28 + .../gonum.org/v1/gonum/lapack/gonum/dorg2l.go | 76 + .../gonum.org/v1/gonum/lapack/gonum/dorg2r.go | 75 + .../gonum.org/v1/gonum/lapack/gonum/dorgbr.go | 138 + .../gonum.org/v1/gonum/lapack/gonum/dorghr.go | 101 + .../gonum.org/v1/gonum/lapack/gonum/dorgl2.go | 71 + .../gonum.org/v1/gonum/lapack/gonum/dorglq.go | 123 + .../gonum.org/v1/gonum/lapack/gonum/dorgql.go | 136 + .../gonum.org/v1/gonum/lapack/gonum/dorgqr.go | 134 + .../gonum.org/v1/gonum/lapack/gonum/dorgtr.go | 104 + .../gonum.org/v1/gonum/lapack/gonum/dorm2r.go | 101 + .../gonum.org/v1/gonum/lapack/gonum/dormbr.go | 178 + .../gonum.org/v1/gonum/lapack/gonum/dormhr.go | 129 + .../gonum.org/v1/gonum/lapack/gonum/dorml2.go | 102 + .../gonum.org/v1/gonum/lapack/gonum/dormlq.go | 174 + .../gonum.org/v1/gonum/lapack/gonum/dormqr.go | 177 + .../gonum.org/v1/gonum/lapack/gonum/dormr2.go | 103 + .../gonum.org/v1/gonum/lapack/gonum/dpbtf2.go | 110 + .../gonum.org/v1/gonum/lapack/gonum/dpocon.go | 90 + .../gonum.org/v1/gonum/lapack/gonum/dpotf2.go | 82 + .../gonum.org/v1/gonum/lapack/gonum/dpotrf.go | 81 + .../gonum.org/v1/gonum/lapack/gonum/dpotri.go | 44 + .../gonum.org/v1/gonum/lapack/gonum/dpotrs.go | 62 + .../gonum.org/v1/gonum/lapack/gonum/drscl.go | 63 + .../gonum.org/v1/gonum/lapack/gonum/dsteqr.go | 376 + .../gonum.org/v1/gonum/lapack/gonum/dsterf.go | 285 + .../gonum.org/v1/gonum/lapack/gonum/dsyev.go | 130 + .../gonum.org/v1/gonum/lapack/gonum/dsytd2.go | 136 + .../gonum.org/v1/gonum/lapack/gonum/dsytrd.go | 172 + .../gonum.org/v1/gonum/lapack/gonum/dtgsja.go | 373 + .../gonum.org/v1/gonum/lapack/gonum/dtrcon.go | 90 + .../v1/gonum/lapack/gonum/dtrevc3.go | 885 + .../gonum.org/v1/gonum/lapack/gonum/dtrexc.go | 230 + .../gonum.org/v1/gonum/lapack/gonum/dtrti2.go | 69 + .../gonum.org/v1/gonum/lapack/gonum/dtrtri.go | 72 + .../gonum.org/v1/gonum/lapack/gonum/dtrtrs.go | 55 + .../gonum.org/v1/gonum/lapack/gonum/errors.go | 174 + .../gonum.org/v1/gonum/lapack/gonum/iladlc.go | 45 + .../gonum.org/v1/gonum/lapack/gonum/iladlr.go | 41 + .../gonum.org/v1/gonum/lapack/gonum/ilaenv.go | 387 + .../gonum.org/v1/gonum/lapack/gonum/iparmq.go | 115 + .../gonum.org/v1/gonum/lapack/gonum/lapack.go | 51 + vendor/gonum.org/v1/gonum/lapack/lapack.go | 213 + .../gonum.org/v1/gonum/lapack/lapack64/doc.go | 20 + .../v1/gonum/lapack/lapack64/lapack64.go | 581 + vendor/gonum.org/v1/gonum/mat/README.md | 3 + vendor/gonum.org/v1/gonum/mat/band.go | 263 + vendor/gonum.org/v1/gonum/mat/cdense.go | 168 + vendor/gonum.org/v1/gonum/mat/cholesky.go | 673 + vendor/gonum.org/v1/gonum/mat/cmatrix.go | 210 + vendor/gonum.org/v1/gonum/mat/consts.go | 15 + vendor/gonum.org/v1/gonum/mat/dense.go | 558 + .../v1/gonum/mat/dense_arithmetic.go | 886 + vendor/gonum.org/v1/gonum/mat/diagonal.go | 311 + vendor/gonum.org/v1/gonum/mat/doc.go | 169 + vendor/gonum.org/v1/gonum/mat/eigen.go | 350 + vendor/gonum.org/v1/gonum/mat/errors.go | 149 + vendor/gonum.org/v1/gonum/mat/format.go | 238 + vendor/gonum.org/v1/gonum/mat/gsvd.go | 415 + vendor/gonum.org/v1/gonum/mat/hogsvd.go | 233 + .../v1/gonum/mat/index_bound_checks.go | 348 + .../v1/gonum/mat/index_no_bound_checks.go | 359 + vendor/gonum.org/v1/gonum/mat/inner.go | 121 + vendor/gonum.org/v1/gonum/mat/io.go | 492 + vendor/gonum.org/v1/gonum/mat/lq.go | 262 + vendor/gonum.org/v1/gonum/mat/lu.go | 422 + vendor/gonum.org/v1/gonum/mat/matrix.go | 946 + vendor/gonum.org/v1/gonum/mat/offset.go | 20 + .../v1/gonum/mat/offset_appengine.go | 24 + vendor/gonum.org/v1/gonum/mat/pool.go | 236 + vendor/gonum.org/v1/gonum/mat/product.go | 193 + vendor/gonum.org/v1/gonum/mat/qr.go | 260 + vendor/gonum.org/v1/gonum/mat/shadow.go | 226 + vendor/gonum.org/v1/gonum/mat/solve.go | 140 + vendor/gonum.org/v1/gonum/mat/svd.go | 247 + vendor/gonum.org/v1/gonum/mat/symband.go | 221 + vendor/gonum.org/v1/gonum/mat/symmetric.go | 602 + vendor/gonum.org/v1/gonum/mat/triangular.go | 659 + vendor/gonum.org/v1/gonum/mat/triband.go | 353 + vendor/gonum.org/v1/gonum/mat/vector.go | 741 + vendor/gopkg.in/yaml.v2/.travis.yml | 18 +- vendor/gopkg.in/yaml.v2/decode.go | 48 +- vendor/gopkg.in/yaml.v2/resolve.go | 2 +- vendor/gopkg.in/yaml.v2/scannerc.go | 123 +- vendor/gopkg.in/yaml.v2/yaml.go | 2 +- vendor/gopkg.in/yaml.v2/yamlh.go | 1 + .../api/admissionregistration/v1/doc.go | 26 + .../admissionregistration/v1/generated.pb.go | 3469 ++ .../admissionregistration/v1/generated.proto | 479 + .../api/admissionregistration/v1/register.go | 53 + .../api/admissionregistration/v1/types.go | 551 + .../v1/types_swagger_doc_generated.go | 151 + .../v1/zz_generated.deepcopy.go | 396 + .../v1beta1/generated.pb.go | 2849 +- .../v1beta1/generated.proto | 247 +- .../admissionregistration/v1beta1/types.go | 230 +- .../v1beta1/types_swagger_doc_generated.go | 68 +- .../v1beta1/zz_generated.deepcopy.go | 185 +- vendor/k8s.io/api/apps/v1/generated.pb.go | 3620 +- vendor/k8s.io/api/apps/v1/generated.proto | 20 +- vendor/k8s.io/api/apps/v1/types.go | 24 +- .../apps/v1/types_swagger_doc_generated.go | 20 +- .../api/apps/v1/zz_generated.deepcopy.go | 10 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 2715 +- .../k8s.io/api/apps/v1beta1/generated.proto | 10 +- vendor/k8s.io/api/apps/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 10 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 6 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 3923 +- vendor/k8s.io/api/apps/v1beta2/types.go | 4 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 10 +- .../v1alpha1/generated.pb.go | 911 +- .../v1alpha1/generated.proto | 8 +- .../api/auditregistration/v1alpha1/types.go | 8 +- .../v1alpha1/types_swagger_doc_generated.go | 3 +- .../v1alpha1/zz_generated.deepcopy.go | 7 +- .../api/authentication/v1/generated.pb.go | 1156 +- .../authentication/v1beta1/generated.pb.go | 708 +- .../api/authorization/v1/generated.pb.go | 1794 +- .../api/authorization/v1beta1/generated.pb.go | 1806 +- .../k8s.io/api/autoscaling/v1/generated.pb.go | 2503 +- .../k8s.io/api/autoscaling/v1/generated.proto | 18 +- vendor/k8s.io/api/autoscaling/v1/types.go | 20 +- .../v1/types_swagger_doc_generated.go | 14 +- .../autoscaling/v1/zz_generated.deepcopy.go | 2 +- .../api/autoscaling/v2beta1/generated.pb.go | 2270 +- .../api/autoscaling/v2beta1/generated.proto | 13 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 15 +- .../v2beta1/types_swagger_doc_generated.go | 8 +- .../v2beta1/zz_generated.deepcopy.go | 2 +- .../api/autoscaling/v2beta2/generated.pb.go | 2355 +- .../api/autoscaling/v2beta2/generated.proto | 13 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 15 +- .../v2beta2/types_swagger_doc_generated.go | 8 +- .../v2beta2/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v1/generated.pb.go | 863 +- vendor/k8s.io/api/batch/v1/generated.proto | 8 +- vendor/k8s.io/api/batch/v1/types.go | 8 +- .../batch/v1/types_swagger_doc_generated.go | 8 +- .../api/batch/v1/zz_generated.deepcopy.go | 2 +- .../k8s.io/api/batch/v1beta1/generated.pb.go | 817 +- .../k8s.io/api/batch/v1beta1/generated.proto | 16 +- vendor/k8s.io/api/batch/v1beta1/types.go | 16 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../batch/v1beta1/zz_generated.deepcopy.go | 2 +- .../k8s.io/api/batch/v2alpha1/generated.pb.go | 817 +- .../k8s.io/api/batch/v2alpha1/generated.proto | 16 +- vendor/k8s.io/api/batch/v2alpha1/types.go | 16 +- .../v2alpha1/types_swagger_doc_generated.go | 16 +- .../batch/v2alpha1/zz_generated.deepcopy.go | 2 +- .../api/certificates/v1beta1/generated.pb.go | 852 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- .../api/coordination/v1/generated.pb.go | 468 +- .../api/coordination/v1/generated.proto | 6 +- vendor/k8s.io/api/coordination/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 6 +- .../coordination/v1/zz_generated.deepcopy.go | 2 +- .../api/coordination/v1beta1/generated.pb.go | 468 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/core/v1/generated.pb.go | 50079 ++++++++++------ vendor/k8s.io/api/core/v1/generated.proto | 647 +- vendor/k8s.io/api/core/v1/register.go | 1 + vendor/k8s.io/api/core/v1/taint.go | 8 +- vendor/k8s.io/api/core/v1/types.go | 686 +- .../core/v1/types_swagger_doc_generated.go | 360 +- .../k8s.io/api/core/v1/well_known_labels.go | 6 + .../api/core/v1/zz_generated.deepcopy.go | 334 +- vendor/k8s.io/api/discovery/v1alpha1/doc.go | 22 + .../api/discovery/v1alpha1/generated.pb.go | 1689 + .../api/discovery/v1alpha1/generated.proto | 148 + .../k8s.io/api/discovery/v1alpha1/register.go | 56 + vendor/k8s.io/api/discovery/v1alpha1/types.go | 144 + .../v1alpha1/types_swagger_doc_generated.go | 85 + .../discovery/v1alpha1/well_known_labels.go | 22 + .../v1alpha1/zz_generated.deepcopy.go | 195 + .../k8s.io/api/events/v1beta1/generated.pb.go | 687 +- .../k8s.io/api/events/v1beta1/generated.proto | 3 +- vendor/k8s.io/api/events/v1beta1/types.go | 7 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../events/v1beta1/zz_generated.deepcopy.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 7013 ++- .../api/extensions/v1beta1/generated.proto | 25 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 29 +- .../v1beta1/types_swagger_doc_generated.go | 15 +- .../v1beta1/zz_generated.deepcopy.go | 43 +- .../k8s.io/api/networking/v1/generated.pb.go | 1055 +- .../k8s.io/api/networking/v1/generated.proto | 2 +- vendor/k8s.io/api/networking/v1/types.go | 2 +- .../v1/types_swagger_doc_generated.go | 2 +- .../networking/v1/zz_generated.deepcopy.go | 2 +- .../api/networking/v1beta1/generated.pb.go | 1085 +- .../api/networking/v1beta1/generated.proto | 8 +- vendor/k8s.io/api/networking/v1beta1/types.go | 8 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/node/v1alpha1/doc.go | 1 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 1177 +- .../k8s.io/api/node/v1alpha1/generated.proto | 48 +- vendor/k8s.io/api/node/v1alpha1/types.go | 47 +- .../v1alpha1/types_swagger_doc_generated.go | 27 +- .../node/v1alpha1/zz_generated.deepcopy.go | 68 +- vendor/k8s.io/api/node/v1beta1/doc.go | 1 + .../k8s.io/api/node/v1beta1/generated.pb.go | 1078 +- .../k8s.io/api/node/v1beta1/generated.proto | 46 +- vendor/k8s.io/api/node/v1beta1/types.go | 45 +- .../v1beta1/types_swagger_doc_generated.go | 29 +- .../api/node/v1beta1/zz_generated.deepcopy.go | 66 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 2738 +- .../k8s.io/api/policy/v1beta1/generated.proto | 28 +- vendor/k8s.io/api/policy/v1beta1/types.go | 33 +- .../v1beta1/types_swagger_doc_generated.go | 17 +- .../policy/v1beta1/zz_generated.deepcopy.go | 35 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 1511 +- .../api/rbac/v1/zz_generated.deepcopy.go | 8 +- .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 1511 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 8 +- .../k8s.io/api/rbac/v1beta1/generated.pb.go | 1511 +- .../api/rbac/v1beta1/zz_generated.deepcopy.go | 8 +- .../k8s.io/api/scheduling/v1/generated.pb.go | 374 +- .../k8s.io/api/scheduling/v1/generated.proto | 12 +- vendor/k8s.io/api/scheduling/v1/types.go | 12 +- .../v1/types_swagger_doc_generated.go | 13 +- .../scheduling/v1/zz_generated.deepcopy.go | 8 +- .../api/scheduling/v1alpha1/generated.pb.go | 374 +- .../api/scheduling/v1alpha1/generated.proto | 12 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 12 +- .../v1alpha1/types_swagger_doc_generated.go | 13 +- .../v1alpha1/zz_generated.deepcopy.go | 8 +- .../api/scheduling/v1beta1/generated.pb.go | 374 +- .../api/scheduling/v1beta1/generated.proto | 8 + vendor/k8s.io/api/scheduling/v1beta1/types.go | 8 + .../v1beta1/types_swagger_doc_generated.go | 11 +- .../v1beta1/zz_generated.deepcopy.go | 8 +- .../api/settings/v1alpha1/generated.pb.go | 537 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 1237 +- vendor/k8s.io/api/storage/v1/generated.proto | 17 +- vendor/k8s.io/api/storage/v1/types.go | 17 +- .../storage/v1/types_swagger_doc_generated.go | 8 +- .../api/storage/v1/zz_generated.deepcopy.go | 9 +- .../api/storage/v1alpha1/generated.pb.go | 840 +- .../api/storage/v1alpha1/generated.proto | 14 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 18 +- .../v1alpha1/types_swagger_doc_generated.go | 4 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 8 +- .../api/storage/v1beta1/generated.pb.go | 2171 +- .../api/storage/v1beta1/generated.proto | 61 +- vendor/k8s.io/api/storage/v1beta1/types.go | 88 +- .../v1beta1/types_swagger_doc_generated.go | 31 +- .../storage/v1beta1/zz_generated.deepcopy.go | 44 +- .../pkg/apis/apiextensions/deepcopy.go | 26 + .../pkg/apis/apiextensions/helpers.go | 7 +- .../pkg/apis/apiextensions/types.go | 44 +- .../apis/apiextensions/types_jsonschema.go | 54 + .../pkg/apis/apiextensions/v1/conversion.go | 212 + .../pkg/apis/apiextensions/v1/deepcopy.go | 248 + .../pkg/apis/apiextensions/v1/defaults.go | 61 + .../pkg/apis/apiextensions/v1/doc.go | 25 + .../pkg/apis/apiextensions/v1/generated.pb.go | 8966 +++ .../pkg/apis/apiextensions/v1/generated.proto | 593 + .../pkg/apis/apiextensions/v1/marshal.go | 135 + .../pkg/apis/apiextensions/v1/register.go | 62 + .../pkg/apis/apiextensions/v1/types.go | 463 + .../apis/apiextensions/v1/types_jsonschema.go | 214 + .../v1/zz_generated.conversion.go | 1307 + .../apiextensions/v1/zz_generated.deepcopy.go | 663 + .../apiextensions/v1/zz_generated.defaults.go | 57 + .../apis/apiextensions/v1beta1/deepcopy.go | 26 + .../apis/apiextensions/v1beta1/defaults.go | 20 +- .../apiextensions/v1beta1/generated.pb.go | 4153 +- .../apiextensions/v1beta1/generated.proto | 312 +- .../pkg/apis/apiextensions/v1beta1/types.go | 276 +- .../apiextensions/v1beta1/types_jsonschema.go | 77 +- .../v1beta1/zz_generated.conversion.go | 79 +- .../v1beta1/zz_generated.deepcopy.go | 12 +- .../v1beta1/zz_generated.defaults.go | 7 + .../apiextensions/zz_generated.deepcopy.go | 7 +- .../client/clientset/clientset/clientset.go | 21 + .../clientset/clientset/scheme/register.go | 2 + .../apiextensions/v1/apiextensions_client.go | 89 + .../v1/customresourcedefinition.go | 180 + .../clientset/typed/apiextensions/v1/doc.go | 20 + .../apiextensions/v1/generated_expansion.go | 21 + .../v1beta1/apiextensions_client.go | 3 +- .../apimachinery/pkg/api/equality/semantic.go | 49 + .../apimachinery/pkg/api/errors/errors.go | 55 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 66 +- .../k8s.io/apimachinery/pkg/api/meta/meta.go | 8 +- .../pkg/api/resource/generated.pb.go | 46 +- .../pkg/api/resource/generated.proto | 2 +- .../apimachinery/pkg/api/resource/math.go | 4 +- .../apimachinery/pkg/api/resource/quantity.go | 30 +- .../pkg/api/resource/quantity_proto.go | 34 +- .../apis/meta/internalversion/conversion.go | 52 - .../pkg/apis/meta/internalversion/register.go | 15 +- .../pkg/apis/meta/internalversion/types.go | 9 + .../zz_generated.conversion.go | 22 +- .../internalversion/zz_generated.deepcopy.go | 2 +- .../pkg/apis/meta/v1/conversion.go | 10 + .../apimachinery/pkg/apis/meta/v1/deepcopy.go | 46 + .../pkg/apis/meta/v1/generated.pb.go | 6013 +- .../pkg/apis/meta/v1/generated.proto | 163 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 29 +- .../apimachinery/pkg/apis/meta/v1/meta.go | 10 +- .../pkg/apis/meta/v1/micro_time.go | 31 +- .../pkg/apis/meta/v1/micro_time_proto.go | 8 + .../apimachinery/pkg/apis/meta/v1/register.go | 17 + .../apimachinery/pkg/apis/meta/v1/time.go | 20 +- .../pkg/apis/meta/v1/time_proto.go | 12 +- .../apimachinery/pkg/apis/meta/v1/types.go | 296 +- .../meta/v1/types_swagger_doc_generated.go | 158 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 22 + .../apis/meta/v1/unstructured/unstructured.go | 48 +- .../meta/v1/unstructured/unstructured_list.go | 22 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 241 +- .../pkg/apis/meta/v1beta1/deepcopy.go | 27 - .../pkg/apis/meta/v1beta1/generated.pb.go | 427 +- .../pkg/apis/meta/v1beta1/generated.proto | 27 +- .../pkg/apis/meta/v1beta1/register.go | 12 +- .../pkg/apis/meta/v1beta1/types.go | 143 +- .../v1beta1/types_swagger_doc_generated.go | 70 +- .../meta/v1beta1/zz_generated.deepcopy.go | 138 +- .../pkg/conversion/queryparams/convert.go | 4 - .../k8s.io/apimachinery/pkg/labels/labels.go | 2 +- .../apimachinery/pkg/labels/selector.go | 6 +- .../apimachinery/pkg/runtime/converter.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 29 + .../apimachinery/pkg/runtime/generated.pb.go | 360 +- .../k8s.io/apimachinery/pkg/runtime/helper.go | 53 +- .../apimachinery/pkg/runtime/interfaces.go | 26 + .../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 + .../pkg/runtime/schema/generated.pb.go | 20 +- .../pkg/runtime/serializer/codec_factory.go | 141 +- .../pkg/runtime/serializer/json/json.go | 115 +- .../runtime/serializer/protobuf/protobuf.go | 58 +- .../runtime/serializer/protobuf_extension.go | 48 - .../serializer/versioning/versioning.go | 86 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 7 +- .../apimachinery/pkg/runtime/types_proto.go | 90 +- .../apimachinery/pkg/util/clock/clock.go | 90 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 229 +- .../pkg/util/intstr/generated.pb.go | 174 +- .../apimachinery/pkg/util/intstr/intstr.go | 6 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 53 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 28 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 17 + .../apimachinery/pkg/util/runtime/runtime.go | 50 +- .../k8s.io/apimachinery/pkg/util/sets/byte.go | 6 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 6 +- .../apimachinery/pkg/util/sets/int32.go | 205 + .../apimachinery/pkg/util/sets/int64.go | 6 +- .../apimachinery/pkg/util/sets/string.go | 6 +- .../k8s.io/apimachinery/pkg/util/uuid/uuid.go | 20 +- .../pkg/util/validation/field/errors.go | 13 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 71 +- .../apimachinery/pkg/watch/streamwatcher.go | 33 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 5 + vendor/k8s.io/client-go/discovery/doc.go | 2 +- .../admissionregistration/interface.go | 8 + .../admissionregistration/v1/interface.go | 52 + .../v1/mutatingwebhookconfiguration.go | 88 + .../v1/validatingwebhookconfiguration.go | 88 + .../informers/discovery/interface.go | 46 + .../discovery/v1alpha1/endpointslice.go | 89 + .../informers/discovery/v1alpha1/interface.go | 45 + .../informers/extensions/v1beta1/interface.go | 7 + .../extensions/v1beta1/networkpolicy.go | 89 + vendor/k8s.io/client-go/informers/factory.go | 6 + vendor/k8s.io/client-go/informers/generic.go | 28 +- .../k8s.io/client-go/kubernetes/clientset.go | 35 + .../client-go/kubernetes/scheme/register.go | 4 + .../v1/admissionregistration_client.go | 94 + .../typed/admissionregistration/v1/doc.go | 20 + .../v1/generated_expansion.go | 23 + .../v1/mutatingwebhookconfiguration.go | 164 + .../v1/validatingwebhookconfiguration.go | 164 + .../v1beta1/admissionregistration_client.go | 3 +- .../kubernetes/typed/apps/v1/apps_client.go | 3 +- .../typed/apps/v1beta1/apps_client.go | 3 +- .../typed/apps/v1beta2/apps_client.go | 3 +- .../v1alpha1/auditregistration_client.go | 3 +- .../v1/authentication_client.go | 3 +- .../v1beta1/authentication_client.go | 3 +- .../authorization/v1/authorization_client.go | 3 +- .../v1beta1/authorization_client.go | 3 +- .../autoscaling/v1/autoscaling_client.go | 3 +- .../autoscaling/v2beta1/autoscaling_client.go | 3 +- .../autoscaling/v2beta2/autoscaling_client.go | 3 +- .../kubernetes/typed/batch/v1/batch_client.go | 3 +- .../typed/batch/v1beta1/batch_client.go | 3 +- .../typed/batch/v2alpha1/batch_client.go | 3 +- .../v1beta1/certificates_client.go | 3 +- .../coordination/v1/coordination_client.go | 3 +- .../v1beta1/coordination_client.go | 3 +- .../kubernetes/typed/core/v1/core_client.go | 3 +- .../client-go/kubernetes/typed/core/v1/pod.go | 31 + .../discovery/v1alpha1/discovery_client.go | 89 + .../typed/discovery/v1alpha1/doc.go | 20 + .../typed/discovery/v1alpha1/endpointslice.go | 174 + .../discovery/v1alpha1/generated_expansion.go | 21 + .../typed/events/v1beta1/event_expansion.go | 98 + .../typed/events/v1beta1/events_client.go | 3 +- .../events/v1beta1/generated_expansion.go | 2 - .../extensions/v1beta1/extensions_client.go | 8 +- .../extensions/v1beta1/generated_expansion.go | 2 + .../typed/extensions/v1beta1/networkpolicy.go | 174 + .../typed/networking/v1/networking_client.go | 3 +- .../networking/v1beta1/networking_client.go | 3 +- .../typed/node/v1alpha1/node_client.go | 3 +- .../typed/node/v1beta1/node_client.go | 3 +- .../typed/policy/v1beta1/policy_client.go | 3 +- .../kubernetes/typed/rbac/v1/rbac_client.go | 3 +- .../typed/rbac/v1alpha1/rbac_client.go | 3 +- .../typed/rbac/v1beta1/rbac_client.go | 3 +- .../typed/scheduling/v1/scheduling_client.go | 3 +- .../scheduling/v1alpha1/scheduling_client.go | 3 +- .../scheduling/v1beta1/scheduling_client.go | 3 +- .../settings/v1alpha1/settings_client.go | 3 +- .../typed/storage/v1/storage_client.go | 3 +- .../typed/storage/v1alpha1/storage_client.go | 3 +- .../typed/storage/v1beta1/storage_client.go | 3 +- .../v1/expansion_generated.go | 27 + .../v1/mutatingwebhookconfiguration.go | 65 + .../v1/validatingwebhookconfiguration.go | 65 + .../discovery/v1alpha1/endpointslice.go | 94 + .../discovery/v1alpha1/expansion_generated.go | 27 + .../extensions/v1beta1/expansion_generated.go | 8 + .../extensions/v1beta1/networkpolicy.go | 94 + vendor/k8s.io/client-go/rest/config.go | 47 +- vendor/k8s.io/client-go/rest/plugin.go | 4 +- vendor/k8s.io/client-go/rest/request.go | 50 +- vendor/k8s.io/client-go/rest/transport.go | 15 +- vendor/k8s.io/client-go/rest/watch/decoder.go | 2 +- .../client-go/rest/zz_generated.deepcopy.go | 5 + vendor/k8s.io/client-go/testing/actions.go | 4 +- vendor/k8s.io/client-go/testing/fixture.go | 13 + .../k8s.io/client-go/tools/auth/clientauth.go | 3 +- .../client-go/tools/cache/controller.go | 5 +- .../client-go/tools/cache/delta_fifo.go | 21 +- .../client-go/tools/cache/expiration_cache.go | 49 +- .../tools/cache/expiration_cache_fakes.go | 5 +- .../tools/cache/fake_custom_store.go | 4 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 11 +- vendor/k8s.io/client-go/tools/cache/heap.go | 10 +- vendor/k8s.io/client-go/tools/cache/index.go | 27 +- .../k8s.io/client-go/tools/cache/listers.go | 3 + .../client-go/tools/cache/mutation_cache.go | 1 + .../tools/cache/mutation_detector.go | 8 +- .../k8s.io/client-go/tools/cache/reflector.go | 62 +- .../tools/cache/reflector_metrics.go | 17 - .../client-go/tools/cache/shared_informer.go | 142 +- .../tools/cache/thread_safe_store.go | 10 +- .../client-go/tools/cache/undelta_store.go | 8 +- .../tools/clientcmd/client_config.go | 16 +- .../client-go/tools/clientcmd/loader.go | 28 +- .../client-go/tools/clientcmd/validation.go | 11 +- vendor/k8s.io/client-go/tools/pager/pager.go | 114 + .../k8s.io/client-go/tools/reference/ref.go | 53 +- vendor/k8s.io/client-go/transport/cache.go | 36 +- vendor/k8s.io/client-go/transport/config.go | 10 + .../client-go/transport/round_trippers.go | 69 +- .../client-go/transport/token_source.go | 18 + .../k8s.io/client-go/transport/transport.go | 20 +- .../util/connrotation/connrotation.go | 10 +- .../client-go/util/flowcontrol/backoff.go | 16 +- .../client-go/util/flowcontrol/throttle.go | 16 + .../k8s.io/client-go/util/homedir/homedir.go | 71 +- vendor/k8s.io/client-go/util/retry/util.go | 15 +- .../util/workqueue/delaying_queue.go | 31 +- vendor/k8s.io/client-go/util/workqueue/doc.go | 2 +- .../client-go/util/workqueue/metrics.go | 84 +- .../client-go/util/workqueue/parallelizer.go | 8 - vendor/k8s.io/code-generator/CONTRIBUTING.md | 7 + vendor/k8s.io/code-generator/LICENSE | 202 + vendor/k8s.io/code-generator/OWNERS | 13 + vendor/k8s.io/code-generator/README.md | 24 + .../k8s.io/code-generator/SECURITY_CONTACTS | 17 + .../code-generator/cmd/client-gen/OWNERS | 10 + .../code-generator/cmd/client-gen/README.md | 4 + .../cmd/client-gen/args/args.go | 120 + .../cmd/client-gen/args/gvpackages.go | 183 + .../cmd/client-gen/args/gvtype.go | 110 + .../client-gen/generators/client_generator.go | 403 + .../generators/fake/fake_client_generator.go | 130 + .../fake/generator_fake_for_clientset.go | 174 + .../fake/generator_fake_for_group.go | 130 + .../fake/generator_fake_for_type.go | 479 + .../generators/generator_for_clientset.go | 183 + .../generators/generator_for_expansion.go | 54 + .../generators/generator_for_group.go | 246 + .../generators/generator_for_type.go | 599 + .../generators/scheme/generator_for_scheme.go | 186 + .../cmd/client-gen/generators/util/tags.go | 341 + .../code-generator/cmd/client-gen/main.go | 66 + .../cmd/client-gen/path/path.go | 31 + .../cmd/client-gen/types/helpers.go | 121 + .../cmd/client-gen/types/types.go | 75 + .../cmd/conversion-gen/args/args.go | 83 + .../conversion-gen/generators/conversion.go | 984 + .../code-generator/cmd/conversion-gen/main.go | 116 + .../cmd/deepcopy-gen/args/args.go | 54 + .../code-generator/cmd/deepcopy-gen/main.go | 85 + .../cmd/defaulter-gen/args/args.go | 54 + .../code-generator/cmd/defaulter-gen/main.go | 84 + .../cmd/go-to-protobuf/.gitignore | 1 + .../code-generator/cmd/go-to-protobuf/OWNERS | 6 + .../code-generator/cmd/go-to-protobuf/main.go | 39 + .../cmd/go-to-protobuf/protobuf/cmd.go | 428 + .../cmd/go-to-protobuf/protobuf/generator.go | 773 + .../go-to-protobuf/protobuf/import_tracker.go | 50 + .../cmd/go-to-protobuf/protobuf/namer.go | 208 + .../cmd/go-to-protobuf/protobuf/package.go | 215 + .../cmd/go-to-protobuf/protobuf/parser.go | 452 + .../cmd/go-to-protobuf/protobuf/tags.go | 33 + .../code-generator/cmd/import-boss/.gitignore | 1 + .../code-generator/cmd/import-boss/main.go | 96 + .../cmd/informer-gen/args/args.go | 77 + .../cmd/informer-gen/generators/factory.go | 258 + .../generators/factoryinterface.go | 90 + .../cmd/informer-gen/generators/generic.go | 184 + .../informer-gen/generators/groupinterface.go | 118 + .../cmd/informer-gen/generators/informer.go | 186 + .../cmd/informer-gen/generators/packages.go | 352 + .../cmd/informer-gen/generators/types.go | 42 + .../generators/versioninterface.go | 109 + .../code-generator/cmd/informer-gen/main.go | 63 + .../cmd/lister-gen/.import-restrictions | 1 + .../cmd/lister-gen/args/args.go | 56 + .../cmd/lister-gen/generators/expansion.go | 67 + .../cmd/lister-gen/generators/lister.go | 371 + .../code-generator/cmd/lister-gen/main.go | 60 + .../code-generator/cmd/openapi-gen/main.go | 57 + .../cmd/register-gen/args/args.go | 39 + .../cmd/register-gen/generators/packages.go | 137 + .../generators/register_external.go | 117 + .../code-generator/cmd/register-gen/main.go | 53 + .../code-generator/cmd/set-gen/.gitignore | 1 + .../k8s.io/code-generator/cmd/set-gen/main.go | 56 + .../k8s.io/code-generator/code-of-conduct.md | 3 + .../k8s.io/code-generator/generate-groups.sh | 92 + .../generate-internal-groups.sh | 122 + vendor/k8s.io/code-generator/go.mod | 25 + vendor/k8s.io/code-generator/go.sum | 132 + .../code-generator/pkg/namer/tag-override.go | 58 + .../k8s.io/code-generator/pkg/util/build.go | 61 + .../third_party/forked/golang/reflect/type.go | 91 + vendor/k8s.io/code-generator/tools.go | 35 + vendor/k8s.io/gengo/LICENSE | 202 + vendor/k8s.io/gengo/args/args.go | 209 + .../deepcopy-gen/generators/deepcopy.go | 924 + .../defaulter-gen/generators/defaulter.go | 828 + .../import-boss/generators/import_restrict.go | 290 + .../gengo/examples/set-gen/generators/sets.go | 362 + .../gengo/examples/set-gen/generators/tags.go | 33 + .../gengo/examples/set-gen/sets/byte.go | 205 + .../k8s.io/gengo/examples/set-gen/sets/doc.go | 20 + .../gengo/examples/set-gen/sets/empty.go | 23 + .../k8s.io/gengo/examples/set-gen/sets/int.go | 205 + .../gengo/examples/set-gen/sets/int64.go | 205 + .../gengo/examples/set-gen/sets/string.go | 205 + .../gengo/generator/default_generator.go | 62 + .../k8s.io/gengo/generator/default_package.go | 72 + vendor/k8s.io/gengo/generator/doc.go | 31 + .../k8s.io/gengo/generator/error_tracker.go | 50 + vendor/k8s.io/gengo/generator/execute.go | 312 + vendor/k8s.io/gengo/generator/generator.go | 219 + .../k8s.io/gengo/generator/import_tracker.go | 64 + .../k8s.io/gengo/generator/snippet_writer.go | 154 + vendor/k8s.io/gengo/namer/doc.go | 31 + vendor/k8s.io/gengo/namer/import_tracker.go | 112 + vendor/k8s.io/gengo/namer/namer.go | 383 + vendor/k8s.io/gengo/namer/order.go | 69 + vendor/k8s.io/gengo/namer/plural_namer.go | 120 + vendor/k8s.io/gengo/parser/doc.go | 19 + vendor/k8s.io/gengo/parser/parse.go | 822 + vendor/k8s.io/gengo/types/comments.go | 82 + vendor/k8s.io/gengo/types/doc.go | 19 + vendor/k8s.io/gengo/types/flatten.go | 57 + vendor/k8s.io/gengo/types/types.go | 499 + vendor/k8s.io/klog/.travis.yml | 3 +- vendor/k8s.io/klog/README.md | 2 +- vendor/k8s.io/klog/go.mod | 5 + vendor/k8s.io/klog/go.sum | 2 + vendor/k8s.io/klog/klog.go | 249 +- .../kube-openapi/cmd/openapi-gen/args/args.go | 76 + .../k8s.io/kube-openapi/pkg/common/common.go | 192 + vendor/k8s.io/kube-openapi/pkg/common/doc.go | 19 + .../kube-openapi/pkg/generators/README.md | 49 + .../kube-openapi/pkg/generators/api_linter.go | 220 + .../kube-openapi/pkg/generators/config.go | 91 + .../kube-openapi/pkg/generators/extension.go | 182 + .../kube-openapi/pkg/generators/openapi.go | 680 + .../kube-openapi/pkg/generators/rules/OWNERS | 4 + .../kube-openapi/pkg/generators/rules/doc.go | 23 + .../pkg/generators/rules/idl_tag.go | 36 + .../pkg/generators/rules/names_match.go | 172 + .../generators/rules/omitempty_match_case.go | 64 + .../kube-openapi/pkg/generators/union.go | 207 + .../kube-openapi/pkg/util/sets/empty.go | 27 + .../kube-openapi/pkg/util/sets/string.go | 207 + vendor/k8s.io/utils/pointer/OWNERS | 10 + vendor/k8s.io/utils/pointer/pointer.go | 86 + vendor/k8s.io/utils/trace/trace.go | 52 +- vendor/modules.txt | 164 +- vendor/sigs.k8s.io/yaml/.travis.yml | 15 +- vendor/sigs.k8s.io/yaml/OWNERS | 2 + vendor/sigs.k8s.io/yaml/README.md | 14 +- vendor/sigs.k8s.io/yaml/go.mod | 8 + vendor/sigs.k8s.io/yaml/go.sum | 9 + vendor/sigs.k8s.io/yaml/yaml.go | 61 + 1270 files changed, 254395 insertions(+), 44220 deletions(-) create mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore create mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml create mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE create mode 100644 vendor/github.com/PuerkitoBio/purell/README.md create mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go create mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml rename vendor/github.com/{pborman/uuid => PuerkitoBio/urlesc}/LICENSE (96%) create mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md create mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go create mode 100644 vendor/github.com/emicklei/go-restful/.gitignore create mode 100644 vendor/github.com/emicklei/go-restful/.travis.yml create mode 100644 vendor/github.com/emicklei/go-restful/CHANGES.md create mode 100644 vendor/github.com/emicklei/go-restful/LICENSE create mode 100644 vendor/github.com/emicklei/go-restful/Makefile create mode 100644 vendor/github.com/emicklei/go-restful/README.md create mode 100644 vendor/github.com/emicklei/go-restful/Srcfile create mode 100644 vendor/github.com/emicklei/go-restful/bench_test.sh create mode 100644 vendor/github.com/emicklei/go-restful/compress.go create mode 100644 vendor/github.com/emicklei/go-restful/compressor_cache.go create mode 100644 vendor/github.com/emicklei/go-restful/compressor_pools.go create mode 100644 vendor/github.com/emicklei/go-restful/compressors.go create mode 100644 vendor/github.com/emicklei/go-restful/constants.go create mode 100644 vendor/github.com/emicklei/go-restful/container.go create mode 100644 vendor/github.com/emicklei/go-restful/cors_filter.go create mode 100644 vendor/github.com/emicklei/go-restful/coverage.sh create mode 100644 vendor/github.com/emicklei/go-restful/curly.go create mode 100644 vendor/github.com/emicklei/go-restful/curly_route.go create mode 100644 vendor/github.com/emicklei/go-restful/doc.go create mode 100644 vendor/github.com/emicklei/go-restful/entity_accessors.go create mode 100644 vendor/github.com/emicklei/go-restful/filter.go create mode 100644 vendor/github.com/emicklei/go-restful/json.go create mode 100644 vendor/github.com/emicklei/go-restful/jsoniter.go create mode 100644 vendor/github.com/emicklei/go-restful/jsr311.go create mode 100644 vendor/github.com/emicklei/go-restful/log/log.go create mode 100644 vendor/github.com/emicklei/go-restful/logger.go create mode 100644 vendor/github.com/emicklei/go-restful/mime.go create mode 100644 vendor/github.com/emicklei/go-restful/options_filter.go create mode 100644 vendor/github.com/emicklei/go-restful/parameter.go create mode 100644 vendor/github.com/emicklei/go-restful/path_expression.go create mode 100644 vendor/github.com/emicklei/go-restful/path_processor.go create mode 100644 vendor/github.com/emicklei/go-restful/request.go create mode 100644 vendor/github.com/emicklei/go-restful/response.go create mode 100644 vendor/github.com/emicklei/go-restful/route.go create mode 100644 vendor/github.com/emicklei/go-restful/route_builder.go create mode 100644 vendor/github.com/emicklei/go-restful/router.go create mode 100644 vendor/github.com/emicklei/go-restful/service_error.go create mode 100644 vendor/github.com/emicklei/go-restful/web_service.go create mode 100644 vendor/github.com/emicklei/go-restful/web_service_container.go create mode 100644 vendor/github.com/go-openapi/jsonpointer/.editorconfig create mode 100644 vendor/github.com/go-openapi/jsonpointer/.gitignore create mode 100644 vendor/github.com/go-openapi/jsonpointer/.travis.yml create mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE create mode 100644 vendor/github.com/go-openapi/jsonpointer/README.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/go.mod create mode 100644 vendor/github.com/go-openapi/jsonpointer/go.sum create mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go create mode 100644 vendor/github.com/go-openapi/jsonreference/.gitignore create mode 100644 vendor/github.com/go-openapi/jsonreference/.travis.yml create mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE create mode 100644 vendor/github.com/go-openapi/jsonreference/README.md create mode 100644 vendor/github.com/go-openapi/jsonreference/go.mod create mode 100644 vendor/github.com/go-openapi/jsonreference/go.sum create mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go create mode 100644 vendor/github.com/go-openapi/spec/.editorconfig create mode 100644 vendor/github.com/go-openapi/spec/.gitignore create mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml create mode 100644 vendor/github.com/go-openapi/spec/.travis.yml create mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/spec/LICENSE create mode 100644 vendor/github.com/go-openapi/spec/README.md create mode 100644 vendor/github.com/go-openapi/spec/bindata.go create mode 100644 vendor/github.com/go-openapi/spec/cache.go create mode 100644 vendor/github.com/go-openapi/spec/contact_info.go create mode 100644 vendor/github.com/go-openapi/spec/debug.go create mode 100644 vendor/github.com/go-openapi/spec/expander.go create mode 100644 vendor/github.com/go-openapi/spec/external_docs.go create mode 100644 vendor/github.com/go-openapi/spec/go.mod create mode 100644 vendor/github.com/go-openapi/spec/go.sum create mode 100644 vendor/github.com/go-openapi/spec/header.go create mode 100644 vendor/github.com/go-openapi/spec/info.go create mode 100644 vendor/github.com/go-openapi/spec/items.go create mode 100644 vendor/github.com/go-openapi/spec/license.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer.go create mode 100644 vendor/github.com/go-openapi/spec/operation.go create mode 100644 vendor/github.com/go-openapi/spec/parameter.go create mode 100644 vendor/github.com/go-openapi/spec/path_item.go create mode 100644 vendor/github.com/go-openapi/spec/paths.go create mode 100644 vendor/github.com/go-openapi/spec/ref.go create mode 100644 vendor/github.com/go-openapi/spec/response.go create mode 100644 vendor/github.com/go-openapi/spec/responses.go create mode 100644 vendor/github.com/go-openapi/spec/schema.go create mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go create mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go create mode 100644 vendor/github.com/go-openapi/spec/spec.go create mode 100644 vendor/github.com/go-openapi/spec/swagger.go create mode 100644 vendor/github.com/go-openapi/spec/tag.go create mode 100644 vendor/github.com/go-openapi/spec/unused.go create mode 100644 vendor/github.com/go-openapi/spec/xml_object.go create mode 100644 vendor/github.com/go-openapi/swag/.editorconfig create mode 100644 vendor/github.com/go-openapi/swag/.gitignore create mode 100644 vendor/github.com/go-openapi/swag/.golangci.yml create mode 100644 vendor/github.com/go-openapi/swag/.travis.yml create mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/swag/LICENSE create mode 100644 vendor/github.com/go-openapi/swag/README.md create mode 100644 vendor/github.com/go-openapi/swag/convert.go create mode 100644 vendor/github.com/go-openapi/swag/convert_types.go create mode 100644 vendor/github.com/go-openapi/swag/doc.go create mode 100644 vendor/github.com/go-openapi/swag/go.mod create mode 100644 vendor/github.com/go-openapi/swag/go.sum create mode 100644 vendor/github.com/go-openapi/swag/json.go create mode 100644 vendor/github.com/go-openapi/swag/loading.go create mode 100644 vendor/github.com/go-openapi/swag/name_lexem.go create mode 100644 vendor/github.com/go-openapi/swag/net.go create mode 100644 vendor/github.com/go-openapi/swag/path.go create mode 100644 vendor/github.com/go-openapi/swag/post_go18.go create mode 100644 vendor/github.com/go-openapi/swag/post_go19.go create mode 100644 vendor/github.com/go-openapi/swag/pre_go18.go create mode 100644 vendor/github.com/go-openapi/swag/pre_go19.go create mode 100644 vendor/github.com/go-openapi/swag/split.go create mode 100644 vendor/github.com/go-openapi/swag/util.go create mode 100644 vendor/github.com/go-openapi/swag/yaml.go create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go create mode 100644 vendor/github.com/json-iterator/go/go.mod create mode 100644 vendor/github.com/json-iterator/go/go.sum create mode 100644 vendor/github.com/mailru/easyjson/LICENSE create mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go create mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go delete mode 100644 vendor/github.com/pborman/uuid/.travis.yml delete mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/pborman/uuid/README.md delete mode 100644 vendor/github.com/pborman/uuid/dce.go delete mode 100644 vendor/github.com/pborman/uuid/doc.go delete mode 100644 vendor/github.com/pborman/uuid/go.mod delete mode 100644 vendor/github.com/pborman/uuid/go.sum delete mode 100644 vendor/github.com/pborman/uuid/hash.go delete mode 100644 vendor/github.com/pborman/uuid/marshal.go delete mode 100644 vendor/github.com/pborman/uuid/node.go delete mode 100644 vendor/github.com/pborman/uuid/sql.go delete mode 100644 vendor/github.com/pborman/uuid/time.go delete mode 100644 vendor/github.com/pborman/uuid/util.go delete mode 100644 vendor/github.com/pborman/uuid/uuid.go delete mode 100644 vendor/github.com/pborman/uuid/version1.go delete mode 100644 vendor/github.com/pborman/uuid/version4.go create mode 100644 vendor/github.com/spf13/pflag/float32_slice.go create mode 100644 vendor/github.com/spf13/pflag/float64_slice.go create mode 100644 vendor/github.com/spf13/pflag/go.mod create mode 100644 vendor/github.com/spf13/pflag/go.sum create mode 100644 vendor/github.com/spf13/pflag/int32_slice.go create mode 100644 vendor/github.com/spf13/pflag/int64_slice.go create mode 100644 vendor/github.com/spf13/pflag/string_to_int64.go create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 vendor/golang.org/x/text/width/gen.go create mode 100644 vendor/golang.org/x/text/width/gen_common.go create mode 100644 vendor/golang.org/x/text/width/gen_trieval.go create mode 100644 vendor/golang.org/x/text/width/kind_string.go create mode 100644 vendor/golang.org/x/text/width/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/width/transform.go create mode 100644 vendor/golang.org/x/text/width/trieval.go create mode 100644 vendor/golang.org/x/text/width/width.go create mode 100644 vendor/golang.org/x/tools/AUTHORS create mode 100644 vendor/golang.org/x/tools/CONTRIBUTORS create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/main.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go create mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go create mode 100644 vendor/golang.org/x/tools/go/packages/doc.go create mode 100644 vendor/golang.org/x/tools/go/packages/external.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go create mode 100644 vendor/golang.org/x/tools/go/packages/packages.go create mode 100644 vendor/golang.org/x/tools/go/packages/visit.go create mode 100644 vendor/golang.org/x/tools/imports/forward.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go create mode 100644 vendor/golang.org/x/tools/internal/gopathwalk/walk.go create mode 100644 vendor/golang.org/x/tools/internal/imports/fix.go create mode 100644 vendor/golang.org/x/tools/internal/imports/imports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mkindex.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mkstdlib.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod_cache.go create mode 100644 vendor/golang.org/x/tools/internal/imports/sortimports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/zstdlib.go create mode 100644 vendor/golang.org/x/tools/internal/module/module.go create mode 100644 vendor/golang.org/x/tools/internal/semver/semver.go create mode 100644 vendor/gonum.org/v1/gonum/AUTHORS create mode 100644 vendor/gonum.org/v1/gonum/CONTRIBUTORS create mode 100644 vendor/gonum.org/v1/gonum/LICENSE create mode 100644 vendor/gonum.org/v1/gonum/blas/README.md create mode 100644 vendor/gonum.org/v1/gonum/blas/blas.go create mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/blas64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/conv.go create mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go create mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/doc.go create mode 100644 vendor/gonum.org/v1/gonum/blas/conversions.bash create mode 100644 vendor/gonum.org/v1/gonum/blas/doc.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/doc.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/errors.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/gemv.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/gonum.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash create mode 100644 vendor/gonum.org/v1/gonum/floats/README.md create mode 100644 vendor/gonum.org/v1/gonum/floats/doc.go create mode 100644 vendor/gonum.org/v1/gonum/floats/floats.go create mode 100644 vendor/gonum.org/v1/gonum/graph/.gitignore create mode 100644 vendor/gonum.org/v1/gonum/graph/README.md create mode 100644 vendor/gonum.org/v1/gonum/graph/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/graph.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/same.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/set.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go create mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/edges.go create mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/lines.go create mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/nodes.go create mode 100644 vendor/gonum.org/v1/gonum/graph/multigraph.go create mode 100644 vendor/gonum.org/v1/gonum/graph/nodes_edges.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/directed.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/simple.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/undirected.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/tarjan.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/topo.go create mode 100644 vendor/gonum.org/v1/gonum/graph/traverse/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/traverse/traverse.go create mode 100644 vendor/gonum.org/v1/gonum/graph/undirect.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/math.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/signbit.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/lapack/.gitignore create mode 100644 vendor/gonum.org/v1/gonum/lapack/README.md create mode 100644 vendor/gonum.org/v1/gonum/lapack/doc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/doc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/errors.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go create mode 100644 vendor/gonum.org/v1/gonum/mat/README.md create mode 100644 vendor/gonum.org/v1/gonum/mat/band.go create mode 100644 vendor/gonum.org/v1/gonum/mat/cdense.go create mode 100644 vendor/gonum.org/v1/gonum/mat/cholesky.go create mode 100644 vendor/gonum.org/v1/gonum/mat/cmatrix.go create mode 100644 vendor/gonum.org/v1/gonum/mat/consts.go create mode 100644 vendor/gonum.org/v1/gonum/mat/dense.go create mode 100644 vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go create mode 100644 vendor/gonum.org/v1/gonum/mat/diagonal.go create mode 100644 vendor/gonum.org/v1/gonum/mat/doc.go create mode 100644 vendor/gonum.org/v1/gonum/mat/eigen.go create mode 100644 vendor/gonum.org/v1/gonum/mat/errors.go create mode 100644 vendor/gonum.org/v1/gonum/mat/format.go create mode 100644 vendor/gonum.org/v1/gonum/mat/gsvd.go create mode 100644 vendor/gonum.org/v1/gonum/mat/hogsvd.go create mode 100644 vendor/gonum.org/v1/gonum/mat/index_bound_checks.go create mode 100644 vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go create mode 100644 vendor/gonum.org/v1/gonum/mat/inner.go create mode 100644 vendor/gonum.org/v1/gonum/mat/io.go create mode 100644 vendor/gonum.org/v1/gonum/mat/lq.go create mode 100644 vendor/gonum.org/v1/gonum/mat/lu.go create mode 100644 vendor/gonum.org/v1/gonum/mat/matrix.go create mode 100644 vendor/gonum.org/v1/gonum/mat/offset.go create mode 100644 vendor/gonum.org/v1/gonum/mat/offset_appengine.go create mode 100644 vendor/gonum.org/v1/gonum/mat/pool.go create mode 100644 vendor/gonum.org/v1/gonum/mat/product.go create mode 100644 vendor/gonum.org/v1/gonum/mat/qr.go create mode 100644 vendor/gonum.org/v1/gonum/mat/shadow.go create mode 100644 vendor/gonum.org/v1/gonum/mat/solve.go create mode 100644 vendor/gonum.org/v1/gonum/mat/svd.go create mode 100644 vendor/gonum.org/v1/gonum/mat/symband.go create mode 100644 vendor/gonum.org/v1/gonum/mat/symmetric.go create mode 100644 vendor/gonum.org/v1/gonum/mat/triangular.go create mode 100644 vendor/gonum.org/v1/gonum/mat/triband.go create mode 100644 vendor/gonum.org/v1/gonum/mat/vector.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/discovery/interface.go create mode 100644 vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go create mode 100644 vendor/k8s.io/code-generator/CONTRIBUTING.md create mode 100644 vendor/k8s.io/code-generator/LICENSE create mode 100644 vendor/k8s.io/code-generator/OWNERS create mode 100644 vendor/k8s.io/code-generator/README.md create mode 100644 vendor/k8s.io/code-generator/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/README.md create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/path/path.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/types/types.go create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go create mode 100644 vendor/k8s.io/code-generator/cmd/import-boss/.gitignore create mode 100644 vendor/k8s.io/code-generator/cmd/import-boss/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/.import-restrictions create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/openapi-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/set-gen/.gitignore create mode 100644 vendor/k8s.io/code-generator/cmd/set-gen/main.go create mode 100644 vendor/k8s.io/code-generator/code-of-conduct.md create mode 100644 vendor/k8s.io/code-generator/generate-groups.sh create mode 100644 vendor/k8s.io/code-generator/generate-internal-groups.sh create mode 100644 vendor/k8s.io/code-generator/go.mod create mode 100644 vendor/k8s.io/code-generator/go.sum create mode 100644 vendor/k8s.io/code-generator/pkg/namer/tag-override.go create mode 100644 vendor/k8s.io/code-generator/pkg/util/build.go create mode 100644 vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go create mode 100644 vendor/k8s.io/code-generator/tools.go create mode 100644 vendor/k8s.io/gengo/LICENSE create mode 100644 vendor/k8s.io/gengo/args/args.go create mode 100644 vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go create mode 100644 vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go create mode 100644 vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/generators/sets.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/generators/tags.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/byte.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/doc.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/empty.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/int.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/int64.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/string.go create mode 100644 vendor/k8s.io/gengo/generator/default_generator.go create mode 100644 vendor/k8s.io/gengo/generator/default_package.go create mode 100644 vendor/k8s.io/gengo/generator/doc.go create mode 100644 vendor/k8s.io/gengo/generator/error_tracker.go create mode 100644 vendor/k8s.io/gengo/generator/execute.go create mode 100644 vendor/k8s.io/gengo/generator/generator.go create mode 100644 vendor/k8s.io/gengo/generator/import_tracker.go create mode 100644 vendor/k8s.io/gengo/generator/snippet_writer.go create mode 100644 vendor/k8s.io/gengo/namer/doc.go create mode 100644 vendor/k8s.io/gengo/namer/import_tracker.go create mode 100644 vendor/k8s.io/gengo/namer/namer.go create mode 100644 vendor/k8s.io/gengo/namer/order.go create mode 100644 vendor/k8s.io/gengo/namer/plural_namer.go create mode 100644 vendor/k8s.io/gengo/parser/doc.go create mode 100644 vendor/k8s.io/gengo/parser/parse.go create mode 100644 vendor/k8s.io/gengo/types/comments.go create mode 100644 vendor/k8s.io/gengo/types/doc.go create mode 100644 vendor/k8s.io/gengo/types/flatten.go create mode 100644 vendor/k8s.io/gengo/types/types.go create mode 100644 vendor/k8s.io/klog/go.mod create mode 100644 vendor/k8s.io/klog/go.sum create mode 100644 vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/common/common.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/common/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/README.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/config.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/extension.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/union.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/sets/string.go create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/pointer.go create mode 100644 vendor/sigs.k8s.io/yaml/go.mod create mode 100644 vendor/sigs.k8s.io/yaml/go.sum diff --git a/pkg/apis/frameworkcontroller/v1/zz_generated.deepcopy.go b/pkg/apis/frameworkcontroller/v1/zz_generated.deepcopy.go index 0034c4ed..70e8f83a 100644 --- a/pkg/apis/frameworkcontroller/v1/zz_generated.deepcopy.go +++ b/pkg/apis/frameworkcontroller/v1/zz_generated.deepcopy.go @@ -1,20 +1,26 @@ // +build !ignore_autogenerated -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by deepcopy-gen. DO NOT EDIT. @@ -387,7 +393,7 @@ func (in *FrameworkAttemptStatus) DeepCopy() *FrameworkAttemptStatus { func (in *FrameworkList) DeepCopyInto(out *FrameworkList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Framework, len(*in)) diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 479bb2d3..77bfccd8 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -1,24 +1,32 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. package versioned import ( + "fmt" + frameworkcontrollerv1 "github.com/microsoft/frameworkcontroller/pkg/client/clientset/versioned/typed/frameworkcontroller/v1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" @@ -51,9 +59,14 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset diff --git a/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/doc.go index 41721ca5..4f612bfd 100644 --- a/pkg/client/clientset/versioned/doc.go +++ b/pkg/client/clientset/versioned/doc.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 8a35f641..95a29911 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. @@ -41,7 +47,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{} + cs := &Clientset{tracker: o} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -63,12 +69,17 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + var _ clientset.Interface = &Clientset{} // FrameworkcontrollerV1 retrieves the FrameworkcontrollerV1Client diff --git a/pkg/client/clientset/versioned/fake/doc.go b/pkg/client/clientset/versioned/fake/doc.go index 9b99e716..94664b9c 100644 --- a/pkg/client/clientset/versioned/fake/doc.go +++ b/pkg/client/clientset/versioned/fake/doc.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index f46e251f..69d23ad3 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/scheme/doc.go b/pkg/client/clientset/versioned/scheme/doc.go index 7dc37561..09463dd1 100644 --- a/pkg/client/clientset/versioned/scheme/doc.go +++ b/pkg/client/clientset/versioned/scheme/doc.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index 310a5978..6ec2985b 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/doc.go b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/doc.go index 3af5d054..f60c7d93 100644 --- a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/doc.go +++ b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/doc.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/doc.go b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/doc.go index 16f44399..77f84e63 100644 --- a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/doc.go +++ b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/doc.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/fake_framework.go b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/fake_framework.go index 6e012c15..c7b3953b 100644 --- a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/fake_framework.go +++ b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/fake_framework.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/fake_frameworkcontroller_client.go b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/fake_frameworkcontroller_client.go index d2220212..7401e10d 100644 --- a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/fake_frameworkcontroller_client.go +++ b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/fake/fake_frameworkcontroller_client.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/framework.go b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/framework.go index 9b9e4ba7..a26c5df4 100644 --- a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/framework.go +++ b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/framework.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/frameworkcontroller_client.go b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/frameworkcontroller_client.go index 358dae95..6b239c14 100644 --- a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/frameworkcontroller_client.go +++ b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/frameworkcontroller_client.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. @@ -21,7 +27,6 @@ package v1 import ( v1 "github.com/microsoft/frameworkcontroller/pkg/apis/frameworkcontroller/v1" "github.com/microsoft/frameworkcontroller/pkg/client/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) @@ -71,7 +76,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/generated_expansion.go index 6e3bcf6e..0efb15a9 100644 --- a/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/frameworkcontroller/v1/generated_expansion.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by client-gen. DO NOT EDIT. diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index 8bb1cc65..35772d6b 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by informer-gen. DO NOT EDIT. diff --git a/pkg/client/informers/externalversions/frameworkcontroller/interface.go b/pkg/client/informers/externalversions/frameworkcontroller/interface.go index eb4572c0..6cd53f27 100644 --- a/pkg/client/informers/externalversions/frameworkcontroller/interface.go +++ b/pkg/client/informers/externalversions/frameworkcontroller/interface.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by informer-gen. DO NOT EDIT. diff --git a/pkg/client/informers/externalversions/frameworkcontroller/v1/framework.go b/pkg/client/informers/externalversions/frameworkcontroller/v1/framework.go index 69cac17d..abff0bdf 100644 --- a/pkg/client/informers/externalversions/frameworkcontroller/v1/framework.go +++ b/pkg/client/informers/externalversions/frameworkcontroller/v1/framework.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by informer-gen. DO NOT EDIT. diff --git a/pkg/client/informers/externalversions/frameworkcontroller/v1/interface.go b/pkg/client/informers/externalversions/frameworkcontroller/v1/interface.go index 064b0a8b..f011db22 100644 --- a/pkg/client/informers/externalversions/frameworkcontroller/v1/interface.go +++ b/pkg/client/informers/externalversions/frameworkcontroller/v1/interface.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by informer-gen. DO NOT EDIT. diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 984a8dfe..47efb9f3 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by informer-gen. DO NOT EDIT. diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index c638cd49..75d478a9 100644 --- a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by informer-gen. DO NOT EDIT. diff --git a/pkg/client/listers/frameworkcontroller/v1/expansion_generated.go b/pkg/client/listers/frameworkcontroller/v1/expansion_generated.go index 40185adf..aeb988cd 100644 --- a/pkg/client/listers/frameworkcontroller/v1/expansion_generated.go +++ b/pkg/client/listers/frameworkcontroller/v1/expansion_generated.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by lister-gen. DO NOT EDIT. diff --git a/pkg/client/listers/frameworkcontroller/v1/framework.go b/pkg/client/listers/frameworkcontroller/v1/framework.go index 33712363..dfb3ed5f 100644 --- a/pkg/client/listers/frameworkcontroller/v1/framework.go +++ b/pkg/client/listers/frameworkcontroller/v1/framework.go @@ -1,18 +1,24 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// MIT License +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE // Code generated by lister-gen. DO NOT EDIT. diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore new file mode 100644 index 00000000..748e4c80 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/.gitignore @@ -0,0 +1,5 @@ +*.sublime-* +.DS_Store +*.swp +*.swo +tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml new file mode 100644 index 00000000..cf31e6af --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - "1.10.x" + - "1.11.x" + - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE new file mode 100644 index 00000000..4b9986de --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md new file mode 100644 index 00000000..07de0c49 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -0,0 +1,188 @@ +# Purell + +Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... + +Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. + +[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) + +## Install + +`go get github.com/PuerkitoBio/purell` + +## Changelog + +* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). +* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). +* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). +* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). +* **v0.2.0** : Add benchmarks, Attempt IDN support. +* **v0.1.0** : Initial release. + +## Examples + +From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): + +```go +package purell + +import ( + "fmt" + "net/url" +) + +func ExampleNormalizeURLString() { + if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", + FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { + panic(err) + } else { + fmt.Print(normalized) + } + // Output: http://somewebsite.com:80/Amazing%3F/url/ +} + +func ExampleMustNormalizeURLString() { + normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", + FlagsUnsafeGreedy) + fmt.Print(normalized) + + // Output: http://somewebsite.com/Amazing%FA/url +} + +func ExampleNormalizeURL() { + if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { + panic(err) + } else { + normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) + fmt.Print(normalized) + } + + // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 +} +``` + +## API + +As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: + +```go +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) +``` + +For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. + +The [full godoc reference is available on gopkgdoc][godoc]. + +Some things to note: + +* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. + +* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): + - %24 -> $ + - %26 -> & + - %2B-%3B -> +,-./0123456789:; + - %3D -> = + - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ + - %5F -> _ + - %61-%7A -> abcdefghijklmnopqrstuvwxyz + - %7E -> ~ + + +* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). + +* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. + +* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. + +### Safe vs Usually Safe vs Unsafe + +Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. + +Consider the following URL: + +`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +Normalizing with the `FlagsSafe` gives: + +`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +With the `FlagsUsuallySafeGreedy`: + +`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` + +And with `FlagsUnsafeGreedy`: + +`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` + +## TODOs + +* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. + +## Thanks / Contributions + +@rogpeppe +@jehiah +@opennota +@pchristopher1275 +@zenovich +@beeker1121 + +## License + +The [BSD 3-Clause license][bsd]. + +[bsd]: http://opensource.org/licenses/BSD-3-Clause +[wiki]: http://en.wikipedia.org/wiki/URL_normalization +[rfc]: http://tools.ietf.org/html/rfc3986#section-6 +[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell +[pr5]: https://github.com/PuerkitoBio/purell/pull/5 +[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go new file mode 100644 index 00000000..6d0fc190 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -0,0 +1,379 @@ +/* +Package purell offers URL normalization as described on the wikipedia page: +http://en.wikipedia.org/wiki/URL_normalization +*/ +package purell + +import ( + "bytes" + "fmt" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/PuerkitoBio/urlesc" + "golang.org/x/net/idna" + "golang.org/x/text/unicode/norm" + "golang.org/x/text/width" +) + +// A set of normalization flags determines how a URL will +// be normalized. +type NormalizationFlags uint + +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) + +const ( + defaultHttpPort = ":80" + defaultHttpsPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) +var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) +var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) +var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) +var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) +var rxEmptyPort = regexp.MustCompile(`:+$`) + +// Map of flags to implementation function. +// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically +// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. + +// Since maps have undefined traversing order, make a slice of ordered keys +var flagsOrder = []NormalizationFlags{ + FlagLowercaseScheme, + FlagLowercaseHost, + FlagRemoveDefaultPort, + FlagRemoveDirectoryIndex, + FlagRemoveDotSegments, + FlagRemoveFragment, + FlagForceHTTP, // Must be after remove default port (because https=443/http=80) + FlagRemoveDuplicateSlashes, + FlagRemoveWWW, + FlagAddWWW, + FlagSortQuery, + FlagDecodeDWORDHost, + FlagDecodeOctalHost, + FlagDecodeHexHost, + FlagRemoveUnnecessaryHostDots, + FlagRemoveEmptyPortSeparator, + FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last + FlagAddTrailingSlash, +} + +// ... and then the map, where order is unimportant +var flags = map[NormalizationFlags]func(*url.URL){ + FlagLowercaseScheme: lowercaseScheme, + FlagLowercaseHost: lowercaseHost, + FlagRemoveDefaultPort: removeDefaultPort, + FlagRemoveDirectoryIndex: removeDirectoryIndex, + FlagRemoveDotSegments: removeDotSegments, + FlagRemoveFragment: removeFragment, + FlagForceHTTP: forceHTTP, + FlagRemoveDuplicateSlashes: removeDuplicateSlashes, + FlagRemoveWWW: removeWWW, + FlagAddWWW: addWWW, + FlagSortQuery: sortQuery, + FlagDecodeDWORDHost: decodeDWORDHost, + FlagDecodeOctalHost: decodeOctalHost, + FlagDecodeHexHost: decodeHexHost, + FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, + FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, + FlagRemoveTrailingSlash: removeTrailingSlash, + FlagAddTrailingSlash: addTrailingSlash, +} + +// MustNormalizeURLString returns the normalized string, and panics if an error occurs. +// It takes an URL string as input, as well as the normalization flags. +func MustNormalizeURLString(u string, f NormalizationFlags) string { + result, e := NormalizeURLString(u, f) + if e != nil { + panic(e) + } + return result +} + +// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. +// It takes an URL string as input, as well as the normalization flags. +func NormalizeURLString(u string, f NormalizationFlags) (string, error) { + parsed, err := url.Parse(u) + if err != nil { + return "", err + } + + if f&FlagLowercaseHost == FlagLowercaseHost { + parsed.Host = strings.ToLower(parsed.Host) + } + + // The idna package doesn't fully conform to RFC 5895 + // (https://tools.ietf.org/html/rfc5895), so we do it here. + // Taken from Go 1.8 cycle source, courtesy of bradfitz. + // TODO: Remove when (if?) idna package conforms to RFC 5895. + parsed.Host = width.Fold.String(parsed.Host) + parsed.Host = norm.NFC.String(parsed.Host) + if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { + return "", err + } + + return NormalizeURL(parsed, f), nil +} + +// NormalizeURL returns the normalized string. +// It takes a parsed URL object as input, as well as the normalization flags. +func NormalizeURL(u *url.URL, f NormalizationFlags) string { + for _, k := range flagsOrder { + if f&k == k { + flags[k](u) + } + } + return urlesc.Escape(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + return "" + } + return val + }) + } +} + +func removeTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if strings.HasSuffix(u.Path, "/") { + u.Path = u.Path[:l-1] + } + } else if l = len(u.Host); l > 0 { + if strings.HasSuffix(u.Host, "/") { + u.Host = u.Host[:l-1] + } + } +} + +func addTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } else if l = len(u.Host); l > 0 { + if !strings.HasSuffix(u.Host, "/") { + u.Host += "/" + } + } +} + +func removeDotSegments(u *url.URL) { + if len(u.Path) > 0 { + var dotFree []string + var lastIsDot bool + + sections := strings.Split(u.Path, "/") + for _, s := range sections { + if s == ".." { + if len(dotFree) > 0 { + dotFree = dotFree[:len(dotFree)-1] + } + } else if s != "." { + dotFree = append(dotFree, s) + } + lastIsDot = (s == "." || s == "..") + } + // Special case if host does not end with / and new path does not begin with / + u.Path = strings.Join(dotFree, "/") + if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + // Special case if the last segment was a dot, make sure the path ends with a slash + if lastIsDot && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } +} + +func removeDirectoryIndex(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") + } +} + +func removeFragment(u *url.URL) { + u.Fragment = "" +} + +func forceHTTP(u *url.URL) { + if strings.ToLower(u.Scheme) == "https" { + u.Scheme = "http" + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} + +func removeWWW(u *url.URL) { + if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = u.Host[4:] + } +} + +func addWWW(u *url.URL) { + if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = "www." + u.Host + } +} + +func sortQuery(u *url.URL) { + q := u.Query() + + if len(q) > 0 { + arKeys := make([]string, len(q)) + i := 0 + for k := range q { + arKeys[i] = k + i++ + } + sort.Strings(arKeys) + buf := new(bytes.Buffer) + for _, k := range arKeys { + sort.Strings(q[k]) + for _, v := range q[k] { + if buf.Len() > 0 { + buf.WriteRune('&') + } + buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) + } + } + + // Rebuild the raw query string + u.RawQuery = buf.String() + } +} + +func decodeDWORDHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { + var parts [4]int64 + + dword, _ := strconv.ParseInt(matches[1], 10, 0) + for i, shift := range []uint{24, 16, 8, 0} { + parts[i] = dword >> shift & 0xFF + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) + } + } +} + +func decodeOctalHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { + var parts [4]int64 + + for i := 1; i <= 4; i++ { + parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) + } + } +} + +func decodeHexHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { + // Conversion is safe because of regex validation + parsed, _ := strconv.ParseInt(matches[1], 16, 0) + // Set host as DWORD (base 10) encoded host + u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) + // The rest is the same as decoding a DWORD host + decodeDWORDHost(u) + } + } +} + +func removeUnncessaryHostDots(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { + // Trim the leading and trailing dots + u.Host = strings.Trim(matches[1], ".") + if len(matches) > 2 { + u.Host += matches[2] + } + } + } +} + +func removeEmptyPortSeparator(u *url.URL) { + if len(u.Host) > 0 { + u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") + } +} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml new file mode 100644 index 00000000..ba6b225f --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - tip + +install: + - go build . + +script: + - go test -v diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE similarity index 96% rename from vendor/github.com/pborman/uuid/LICENSE rename to vendor/github.com/PuerkitoBio/urlesc/LICENSE index 5dc68268..74487567 100644 --- a/vendor/github.com/pborman/uuid/LICENSE +++ b/vendor/github.com/PuerkitoBio/urlesc/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md new file mode 100644 index 00000000..57aff0a5 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/README.md @@ -0,0 +1,16 @@ +urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) +====== + +Package urlesc implements query escaping as per RFC 3986. + +It contains some parts of the net/url package, modified so as to allow +some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). + +## Install + + go get github.com/PuerkitoBio/urlesc + +## License + +Go license (BSD-3-Clause) + diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go new file mode 100644 index 00000000..1b846245 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go @@ -0,0 +1,180 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package urlesc implements query escaping as per RFC 3986. +// It contains some parts of the net/url package, modified so as to allow +// some reserved characters incorrectly escaped by net/url. +// See https://github.com/golang/go/issues/5684 +package urlesc + +import ( + "bytes" + "net/url" + "strings" +) + +type encoding int + +const ( + encodePath encoding = 1 + iota + encodeUserPassword + encodeQueryComponent + encodeFragment +) + +// Return true if the specified character should be escaped when +// appearing in a URL string, according to RFC 3986. +func shouldEscape(c byte, mode encoding) bool { + // §2.3 Unreserved characters (alphanum) + if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { + return false + } + + switch c { + case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) + return false + + // §2.2 Reserved characters (reserved) + case ':', '/', '?', '#', '[', ']', '@', // gen-delims + '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims + // Different sections of the URL allow a few of + // the reserved characters to appear unescaped. + switch mode { + case encodePath: // §3.3 + // The RFC allows sub-delims and : @. + // '/', '[' and ']' can be used to assign meaning to individual path + // segments. This package only manipulates the path as a whole, + // so we allow those as well. That leaves only ? and # to escape. + return c == '?' || c == '#' + + case encodeUserPassword: // §3.2.1 + // The RFC allows : and sub-delims in + // userinfo. The parsing of userinfo treats ':' as special so we must escape + // all the gen-delims. + return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' + + case encodeQueryComponent: // §3.4 + // The RFC allows / and ?. + return c != '/' && c != '?' + + case encodeFragment: // §4.1 + // The RFC text is silent but the grammar allows + // everything, so escape nothing but # + return c == '#' + } + } + + // Everything else must be escaped. + return true +} + +// QueryEscape escapes the string so it can be safely placed +// inside a URL query. +func QueryEscape(s string) string { + return escape(s, encodeQueryComponent) +} + +func escape(s string, mode encoding) string { + spaceCount, hexCount := 0, 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c, mode) { + if c == ' ' && mode == encodeQueryComponent { + spaceCount++ + } else { + hexCount++ + } + } + } + + if spaceCount == 0 && hexCount == 0 { + return s + } + + t := make([]byte, len(s)+2*hexCount) + j := 0 + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c == ' ' && mode == encodeQueryComponent: + t[j] = '+' + j++ + case shouldEscape(c, mode): + t[j] = '%' + t[j+1] = "0123456789ABCDEF"[c>>4] + t[j+2] = "0123456789ABCDEF"[c&15] + j += 3 + default: + t[j] = s[i] + j++ + } + } + return string(t) +} + +var uiReplacer = strings.NewReplacer( + "%21", "!", + "%27", "'", + "%28", "(", + "%29", ")", + "%2A", "*", +) + +// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. +func unescapeUserinfo(s string) string { + return uiReplacer.Replace(s) +} + +// Escape reassembles the URL into a valid URL string. +// The general form of the result is one of: +// +// scheme:opaque +// scheme://userinfo@host/path?query#fragment +// +// If u.Opaque is non-empty, String uses the first form; +// otherwise it uses the second form. +// +// In the second form, the following rules apply: +// - if u.Scheme is empty, scheme: is omitted. +// - if u.User is nil, userinfo@ is omitted. +// - if u.Host is empty, host/ is omitted. +// - if u.Scheme and u.Host are empty and u.User is nil, +// the entire scheme://userinfo@host/ is omitted. +// - if u.Host is non-empty and u.Path begins with a /, +// the form host/path does not add its own /. +// - if u.RawQuery is empty, ?query is omitted. +// - if u.Fragment is empty, #fragment is omitted. +func Escape(u *url.URL) string { + var buf bytes.Buffer + if u.Scheme != "" { + buf.WriteString(u.Scheme) + buf.WriteByte(':') + } + if u.Opaque != "" { + buf.WriteString(u.Opaque) + } else { + if u.Scheme != "" || u.Host != "" || u.User != nil { + buf.WriteString("//") + if ui := u.User; ui != nil { + buf.WriteString(unescapeUserinfo(ui.String())) + buf.WriteByte('@') + } + if h := u.Host; h != "" { + buf.WriteString(h) + } + } + if u.Path != "" && u.Path[0] != '/' && u.Host != "" { + buf.WriteByte('/') + } + buf.WriteString(escape(u.Path, encodePath)) + } + if u.RawQuery != "" { + buf.WriteByte('?') + buf.WriteString(u.RawQuery) + } + if u.Fragment != "" { + buf.WriteByte('#') + buf.WriteString(escape(u.Fragment, encodeFragment)) + } + return buf.String() +} diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/.gitignore new file mode 100644 index 00000000..cece7be6 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/.gitignore @@ -0,0 +1,70 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +restful.html + +*.out + +tmp.prof + +go-restful.test + +examples/restful-basic-authentication + +examples/restful-encoding-filter + +examples/restful-filters + +examples/restful-hello-world + +examples/restful-resource-functions + +examples/restful-serve-static + +examples/restful-user-service + +*.DS_Store +examples/restful-user-resource + +examples/restful-multi-containers + +examples/restful-form-handling + +examples/restful-CORS-filter + +examples/restful-options-filter + +examples/restful-curly-router + +examples/restful-cpuprofiler-service + +examples/restful-pre-post-filters + +curly.prof + +examples/restful-NCSA-logging + +examples/restful-html-template + +s.html +restful-path-tail diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/.travis.yml new file mode 100644 index 00000000..b22f8f54 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.x + +script: go test -v \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md new file mode 100644 index 00000000..e5252963 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -0,0 +1,273 @@ +## Change history of go-restful + + +v2.9.5 +- fix panic in Response.WriteError if err == nil + +v2.9.4 + +- fix issue #400 , parsing mime type quality +- Route Builder added option for contentEncodingEnabled (#398) + +v2.9.3 + +- Avoid return of 415 Unsupported Media Type when request body is empty (#396) + +v2.9.2 + +- Reduce allocations in per-request methods to improve performance (#395) + +v2.9.1 + +- Fix issue with default responses and invalid status code 0. (#393) + +v2.9.0 + +- add per Route content encoding setting (overrides container setting) + +v2.8.0 + +- add Request.QueryParameters() +- add json-iterator (via build tag) +- disable vgo module (until log is moved) + +v2.7.1 + +- add vgo module + +v2.6.1 + +- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) + +v2.6.0 + +- Make JSR 311 routing and path param processing consistent +- Adding description to RouteBuilder.Reads() +- Update example for Swagger12 and OpenAPI + +2017-09-13 + +- added route condition functions using `.If(func)` in route building. + +2017-02-16 + +- solved issue #304, make operation names unique + +2017-01-30 + + [IMPORTANT] For swagger users, change your import statement to: + swagger "github.com/emicklei/go-restful-swagger12" + +- moved swagger 1.2 code to go-restful-swagger12 +- created TAG 2.0.0 + +2017-01-27 + +- remove defer request body close +- expose Dispatch for testing filters and Routefunctions +- swagger response model cannot be array +- created TAG 1.0.0 + +2016-12-22 + +- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) + +2016-11-26 + +- Default change! now use CurlyRouter (was RouterJSR311) +- Default change! no more caching of request content +- Default change! do not recover from panics + +2016-09-22 + +- fix the DefaultRequestContentType feature + +2016-02-14 + +- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response +- add constructors for custom entity accessors for xml and json + +2015-09-27 + +- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency + +2015-09-25 + +- fixed problem with changing Header after WriteHeader (issue 235) + +2015-09-14 + +- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) +- added support for custom EntityReaderWriters. + +2015-08-06 + +- add support for reading entities from compressed request content +- use sync.Pool for compressors of http response and request body +- add Description to Parameter for documentation in Swagger UI + +2015-03-20 + +- add configurable logging + +2015-03-18 + +- if not specified, the Operation is derived from the Route function + +2015-03-17 + +- expose Parameter creation functions +- make trace logger an interface +- fix OPTIONSFilter +- customize rendering of ServiceError +- JSR311 router now handles wildcards +- add Notes to Route + +2014-11-27 + +- (api add) PrettyPrint per response. (as proposed in #167) + +2014-11-12 + +- (api add) ApiVersion(.) for documentation in Swagger UI + +2014-11-10 + +- (api change) struct fields tagged with "description" show up in Swagger UI + +2014-10-31 + +- (api change) ReturnsError -> Returns +- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder +- fix swagger nested structs +- sort Swagger response messages by code + +2014-10-23 + +- (api add) ReturnsError allows you to document Http codes in swagger +- fixed problem with greedy CurlyRouter +- (api add) Access-Control-Max-Age in CORS +- add tracing functionality (injectable) for debugging purposes +- support JSON parse 64bit int +- fix empty parameters for swagger +- WebServicesUrl is now optional for swagger +- fixed duplicate AccessControlAllowOrigin in CORS +- (api change) expose ServeMux in container +- (api add) added AllowedDomains in CORS +- (api add) ParameterNamed for detailed documentation + +2014-04-16 + +- (api add) expose constructor of Request for testing. + +2014-06-27 + +- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). +- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). + +2014-07-03 + +- (api add) CORS can be configured with a list of allowed domains + +2014-03-12 + +- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) + +2014-02-26 + +- (api add) Request now provides information about the matched Route, see method SelectedRoutePath + +2014-02-17 + +- (api change) renamed parameter constants (go-lint checks) + +2014-01-10 + +- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier + +2014-01-07 + +- (api change) Write* methods in Response now return the error or nil. +- added example of serving HTML from a Go template. +- fixed comparing Allowed headers in CORS (is now case-insensitive) + +2013-11-13 + +- (api add) Response knows how many bytes are written to the response body. + +2013-10-29 + +- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. + +2013-10-04 + +- (api add) Response knows what HTTP status has been written +- (api add) Request can have attributes (map of string->interface, also called request-scoped variables + +2013-09-12 + +- (api change) Router interface simplified +- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths + +2013-08-05 + - add OPTIONS support + - add CORS support + +2013-08-27 + +- fixed some reported issues (see github) +- (api change) deprecated use of WriteError; use WriteErrorString instead + +2014-04-15 + +- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString + +2013-08-08 + +- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. +- (api add) the swagger package has be extended to have a UI per container. +- if panic is detected then a small stack trace is printed (thanks to runner-mei) +- (api add) WriteErrorString to Response + +Important API changes: + +- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. +- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. + + +2013-07-06 + +- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. + +2013-06-19 + +- (improve) DoNotRecover option, moved request body closer, improved ReadEntity + +2013-06-03 + +- (api change) removed Dispatcher interface, hide PathExpression +- changed receiver names of type functions to be more idiomatic Go + +2013-06-02 + +- (optimize) Cache the RegExp compilation of Paths. + +2013-05-22 + +- (api add) Added support for request/response filter functions + +2013-05-18 + + +- (api add) Added feature to change the default Http Request Dispatch function (travis cline) +- (api change) Moved Swagger Webservice to swagger package (see example restful-user) + +[2012-11-14 .. 2013-05-18> + +- See https://github.com/emicklei/go-restful/commits + +2012-11-14 + +- Initial commit + + diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/LICENSE new file mode 100644 index 00000000..ece7ec61 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012,2013 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile new file mode 100644 index 00000000..b40081cc --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/Makefile @@ -0,0 +1,7 @@ +all: test + +test: + go test -v . + +ex: + cd examples && ls *.go | xargs go build -o /tmp/ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md new file mode 100644 index 00000000..f52c25ac --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/README.md @@ -0,0 +1,88 @@ +go-restful +========== +package for building REST-style Web Services using Google Go + +[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) +[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) +[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful) + +- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) + +REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: + +- GET = Retrieve a representation of a resource +- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. +- PUT = Create if you are sending the full content of the specified resource (URI). +- PUT = Update if you are updating the full content of the specified resource. +- DELETE = Delete if you are requesting the server to delete the resource +- PATCH = Update partial content of a resource +- OPTIONS = Get information about the communication options for the request URI + +### Example + +```Go +ws := new(restful.WebService) +ws. + Path("/users"). + Consumes(restful.MIME_XML, restful.MIME_JSON). + Produces(restful.MIME_JSON, restful.MIME_XML) + +ws.Route(ws.GET("/{user-id}").To(u.findUser). + Doc("get a user"). + Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). + Writes(User{})) +... + +func (u UserResource) findUser(request *restful.Request, response *restful.Response) { + id := request.PathParameter("user-id") + ... +} +``` + +[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) + +### Features + +- Routes for request → function mapping with path parameter (e.g. {id}) support +- Configurable router: + - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*} + - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions +- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) +- Response API for writing structs to JSON/XML and setting headers +- Customizable encoding using EntityReaderWriter registration +- Filters for intercepting the request → response flow on Service or Route level +- Request-scoped variables using attributes +- Containers for WebServices on different HTTP endpoints +- Content encoding (gzip,deflate) of request and response payloads +- Automatic responses on OPTIONS (using a filter) +- Automatic CORS request handling (using a filter) +- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) +- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) +- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) +- Configurable (trace) logging +- Customizable gzip/deflate readers and writers using CompressorProvider registration + +## How to customize +There are several hooks to customize the behavior of the go-restful package. + +- Router algorithm +- Panic recovery +- JSON decoder +- Trace logging +- Compression +- Encoders for other serializers +- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` + +TODO: write examples of these. + +## Resources + +- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) +- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) +- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) +- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia) +- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) + +Type ```git shortlog -s``` for a full list of contributors. + +© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/Srcfile new file mode 100644 index 00000000..16fd1868 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/Srcfile @@ -0,0 +1 @@ +{"SkipDirs": ["examples"]} diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/bench_test.sh new file mode 100644 index 00000000..47ffbe4a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/bench_test.sh @@ -0,0 +1,10 @@ +#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out + +go test -c +./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany +./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly + +#go tool pprof go-restful.test tmp.prof +go tool pprof go-restful.test curly.prof + + diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go new file mode 100644 index 00000000..220b3771 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/compress.go @@ -0,0 +1,123 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bufio" + "compress/gzip" + "compress/zlib" + "errors" + "io" + "net" + "net/http" + "strings" +) + +// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting. +var EnableContentEncoding = false + +// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib) +type CompressingResponseWriter struct { + writer http.ResponseWriter + compressor io.WriteCloser + encoding string +} + +// Header is part of http.ResponseWriter interface +func (c *CompressingResponseWriter) Header() http.Header { + return c.writer.Header() +} + +// WriteHeader is part of http.ResponseWriter interface +func (c *CompressingResponseWriter) WriteHeader(status int) { + c.writer.WriteHeader(status) +} + +// Write is part of http.ResponseWriter interface +// It is passed through the compressor +func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) { + if c.isCompressorClosed() { + return -1, errors.New("Compressing error: tried to write data using closed compressor") + } + return c.compressor.Write(bytes) +} + +// CloseNotify is part of http.CloseNotifier interface +func (c *CompressingResponseWriter) CloseNotify() <-chan bool { + return c.writer.(http.CloseNotifier).CloseNotify() +} + +// Close the underlying compressor +func (c *CompressingResponseWriter) Close() error { + if c.isCompressorClosed() { + return errors.New("Compressing error: tried to close already closed compressor") + } + + c.compressor.Close() + if ENCODING_GZIP == c.encoding { + currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer)) + } + if ENCODING_DEFLATE == c.encoding { + currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer)) + } + // gc hint needed? + c.compressor = nil + return nil +} + +func (c *CompressingResponseWriter) isCompressorClosed() bool { + return nil == c.compressor +} + +// Hijack implements the Hijacker interface +// This is especially useful when combining Container.EnabledContentEncoding +// in combination with websockets (for instance gorilla/websocket) +func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := c.writer.(http.Hijacker) + if !ok { + return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface") + } + return hijacker.Hijack() +} + +// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. +func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { + header := httpRequest.Header.Get(HEADER_AcceptEncoding) + gi := strings.Index(header, ENCODING_GZIP) + zi := strings.Index(header, ENCODING_DEFLATE) + // use in order of appearance + if gi == -1 { + return zi != -1, ENCODING_DEFLATE + } else if zi == -1 { + return gi != -1, ENCODING_GZIP + } else { + if gi < zi { + return true, ENCODING_GZIP + } + return true, ENCODING_DEFLATE + } +} + +// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate} +func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) { + httpWriter.Header().Set(HEADER_ContentEncoding, encoding) + c := new(CompressingResponseWriter) + c.writer = httpWriter + var err error + if ENCODING_GZIP == encoding { + w := currentCompressorProvider.AcquireGzipWriter() + w.Reset(httpWriter) + c.compressor = w + c.encoding = ENCODING_GZIP + } else if ENCODING_DEFLATE == encoding { + w := currentCompressorProvider.AcquireZlibWriter() + w.Reset(httpWriter) + c.compressor = w + c.encoding = ENCODING_DEFLATE + } else { + return nil, errors.New("Unknown encoding:" + encoding) + } + return c, err +} diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/compressor_cache.go new file mode 100644 index 00000000..ee426010 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/compressor_cache.go @@ -0,0 +1,103 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/gzip" + "compress/zlib" +) + +// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount +// of writers and readers (resources). +// If a new resource is acquired and all are in use, it will return a new unmanaged resource. +type BoundedCachedCompressors struct { + gzipWriters chan *gzip.Writer + gzipReaders chan *gzip.Reader + zlibWriters chan *zlib.Writer + writersCapacity int + readersCapacity int +} + +// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors. +func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors { + b := &BoundedCachedCompressors{ + gzipWriters: make(chan *gzip.Writer, writersCapacity), + gzipReaders: make(chan *gzip.Reader, readersCapacity), + zlibWriters: make(chan *zlib.Writer, writersCapacity), + writersCapacity: writersCapacity, + readersCapacity: readersCapacity, + } + for ix := 0; ix < writersCapacity; ix++ { + b.gzipWriters <- newGzipWriter() + b.zlibWriters <- newZlibWriter() + } + for ix := 0; ix < readersCapacity; ix++ { + b.gzipReaders <- newGzipReader() + } + return b +} + +// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released. +func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer { + var writer *gzip.Writer + select { + case writer, _ = <-b.gzipWriters: + default: + // return a new unmanaged one + writer = newGzipWriter() + } + return writer +} + +// ReleaseGzipWriter accepts a writer (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) { + // forget the unmanaged ones + if len(b.gzipWriters) < b.writersCapacity { + b.gzipWriters <- w + } +} + +// AcquireGzipReader returns a *gzip.Reader. Needs to be released. +func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader { + var reader *gzip.Reader + select { + case reader, _ = <-b.gzipReaders: + default: + // return a new unmanaged one + reader = newGzipReader() + } + return reader +} + +// ReleaseGzipReader accepts a reader (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) { + // forget the unmanaged ones + if len(b.gzipReaders) < b.readersCapacity { + b.gzipReaders <- r + } +} + +// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released. +func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer { + var writer *zlib.Writer + select { + case writer, _ = <-b.zlibWriters: + default: + // return a new unmanaged one + writer = newZlibWriter() + } + return writer +} + +// ReleaseZlibWriter accepts a writer (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) { + // forget the unmanaged ones + if len(b.zlibWriters) < b.writersCapacity { + b.zlibWriters <- w + } +} diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/compressor_pools.go new file mode 100644 index 00000000..d866ce64 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/compressor_pools.go @@ -0,0 +1,91 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "sync" +) + +// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool. +type SyncPoolCompessors struct { + GzipWriterPool *sync.Pool + GzipReaderPool *sync.Pool + ZlibWriterPool *sync.Pool +} + +// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors. +func NewSyncPoolCompessors() *SyncPoolCompessors { + return &SyncPoolCompessors{ + GzipWriterPool: &sync.Pool{ + New: func() interface{} { return newGzipWriter() }, + }, + GzipReaderPool: &sync.Pool{ + New: func() interface{} { return newGzipReader() }, + }, + ZlibWriterPool: &sync.Pool{ + New: func() interface{} { return newZlibWriter() }, + }, + } +} + +func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer { + return s.GzipWriterPool.Get().(*gzip.Writer) +} + +func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) { + s.GzipWriterPool.Put(w) +} + +func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader { + return s.GzipReaderPool.Get().(*gzip.Reader) +} + +func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) { + s.GzipReaderPool.Put(r) +} + +func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer { + return s.ZlibWriterPool.Get().(*zlib.Writer) +} + +func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) { + s.ZlibWriterPool.Put(w) +} + +func newGzipWriter() *gzip.Writer { + // create with an empty bytes writer; it will be replaced before using the gzipWriter + writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) + if err != nil { + panic(err.Error()) + } + return writer +} + +func newGzipReader() *gzip.Reader { + // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader + // we can safely use currentCompressProvider because it is set on package initialization. + w := currentCompressorProvider.AcquireGzipWriter() + defer currentCompressorProvider.ReleaseGzipWriter(w) + b := new(bytes.Buffer) + w.Reset(b) + w.Flush() + w.Close() + reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) + if err != nil { + panic(err.Error()) + } + return reader +} + +func newZlibWriter() *zlib.Writer { + writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) + if err != nil { + panic(err.Error()) + } + return writer +} diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go new file mode 100644 index 00000000..9db4a8c8 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/compressors.go @@ -0,0 +1,54 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/gzip" + "compress/zlib" +) + +// CompressorProvider describes a component that can provider compressors for the std methods. +type CompressorProvider interface { + // Returns a *gzip.Writer which needs to be released later. + // Before using it, call Reset(). + AcquireGzipWriter() *gzip.Writer + + // Releases an acquired *gzip.Writer. + ReleaseGzipWriter(w *gzip.Writer) + + // Returns a *gzip.Reader which needs to be released later. + AcquireGzipReader() *gzip.Reader + + // Releases an acquired *gzip.Reader. + ReleaseGzipReader(w *gzip.Reader) + + // Returns a *zlib.Writer which needs to be released later. + // Before using it, call Reset(). + AcquireZlibWriter() *zlib.Writer + + // Releases an acquired *zlib.Writer. + ReleaseZlibWriter(w *zlib.Writer) +} + +// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip). +var currentCompressorProvider CompressorProvider + +func init() { + currentCompressorProvider = NewSyncPoolCompessors() +} + +// CurrentCompressorProvider returns the current CompressorProvider. +// It is initialized using a SyncPoolCompessors. +func CurrentCompressorProvider() CompressorProvider { + return currentCompressorProvider +} + +// SetCompressorProvider sets the actual provider of compressors (zlib or gzip). +func SetCompressorProvider(p CompressorProvider) { + if p == nil { + panic("cannot set compressor provider to nil") + } + currentCompressorProvider = p +} diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/constants.go new file mode 100644 index 00000000..203439c5 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/constants.go @@ -0,0 +1,30 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +const ( + MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default + + HEADER_Allow = "Allow" + HEADER_Accept = "Accept" + HEADER_Origin = "Origin" + HEADER_ContentType = "Content-Type" + HEADER_LastModified = "Last-Modified" + HEADER_AcceptEncoding = "Accept-Encoding" + HEADER_ContentEncoding = "Content-Encoding" + HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers" + HEADER_AccessControlRequestMethod = "Access-Control-Request-Method" + HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers" + HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods" + HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin" + HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials" + HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers" + HEADER_AccessControlMaxAge = "Access-Control-Max-Age" + + ENCODING_GZIP = "gzip" + ENCODING_DEFLATE = "deflate" +) diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go new file mode 100644 index 00000000..061a8d71 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/container.go @@ -0,0 +1,377 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "os" + "runtime" + "strings" + "sync" + + "github.com/emicklei/go-restful/log" +) + +// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. +// The requests are further dispatched to routes of WebServices using a RouteSelector +type Container struct { + webServicesLock sync.RWMutex + webServices []*WebService + ServeMux *http.ServeMux + isRegisteredOnRoot bool + containerFilters []FilterFunction + doNotRecover bool // default is true + recoverHandleFunc RecoverHandleFunction + serviceErrorHandleFunc ServiceErrorHandleFunction + router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative) + contentEncodingEnabled bool // default is false +} + +// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter) +func NewContainer() *Container { + return &Container{ + webServices: []*WebService{}, + ServeMux: http.NewServeMux(), + isRegisteredOnRoot: false, + containerFilters: []FilterFunction{}, + doNotRecover: true, + recoverHandleFunc: logStackOnRecover, + serviceErrorHandleFunc: writeServiceError, + router: CurlyRouter{}, + contentEncodingEnabled: false} +} + +// RecoverHandleFunction declares functions that can be used to handle a panic situation. +// The first argument is what recover() returns. The second must be used to communicate an error response. +type RecoverHandleFunction func(interface{}, http.ResponseWriter) + +// RecoverHandler changes the default function (logStackOnRecover) to be called +// when a panic is detected. DoNotRecover must be have its default value (=false). +func (c *Container) RecoverHandler(handler RecoverHandleFunction) { + c.recoverHandleFunc = handler +} + +// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation. +// The first argument is the service error, the second is the request that resulted in the error and +// the third must be used to communicate an error response. +type ServiceErrorHandleFunction func(ServiceError, *Request, *Response) + +// ServiceErrorHandler changes the default function (writeServiceError) to be called +// when a ServiceError is detected. +func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { + c.serviceErrorHandleFunc = handler +} + +// DoNotRecover controls whether panics will be caught to return HTTP 500. +// If set to true, Route functions are responsible for handling any error situation. +// Default value is true. +func (c *Container) DoNotRecover(doNot bool) { + c.doNotRecover = doNot +} + +// Router changes the default Router (currently CurlyRouter) +func (c *Container) Router(aRouter RouteSelector) { + c.router = aRouter +} + +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. +func (c *Container) EnableContentEncoding(enabled bool) { + c.contentEncodingEnabled = enabled +} + +// Add a WebService to the Container. It will detect duplicate root paths and exit in that case. +func (c *Container) Add(service *WebService) *Container { + c.webServicesLock.Lock() + defer c.webServicesLock.Unlock() + + // if rootPath was not set then lazy initialize it + if len(service.rootPath) == 0 { + service.Path("/") + } + + // cannot have duplicate root paths + for _, each := range c.webServices { + if each.RootPath() == service.RootPath() { + log.Printf("WebService with duplicate root path detected:['%v']", each) + os.Exit(1) + } + } + + // If not registered on root then add specific mapping + if !c.isRegisteredOnRoot { + c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux) + } + c.webServices = append(c.webServices, service) + return c +} + +// addHandler may set a new HandleFunc for the serveMux +// this function must run inside the critical region protected by the webServicesLock. +// returns true if the function was registered on root ("/") +func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool { + pattern := fixedPrefixPath(service.RootPath()) + // check if root path registration is needed + if "/" == pattern || "" == pattern { + serveMux.HandleFunc("/", c.dispatch) + return true + } + // detect if registration already exists + alreadyMapped := false + for _, each := range c.webServices { + if each.RootPath() == service.RootPath() { + alreadyMapped = true + break + } + } + if !alreadyMapped { + serveMux.HandleFunc(pattern, c.dispatch) + if !strings.HasSuffix(pattern, "/") { + serveMux.HandleFunc(pattern+"/", c.dispatch) + } + } + return false +} + +func (c *Container) Remove(ws *WebService) error { + if c.ServeMux == http.DefaultServeMux { + errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) + log.Print(errMsg) + return errors.New(errMsg) + } + c.webServicesLock.Lock() + defer c.webServicesLock.Unlock() + // build a new ServeMux and re-register all WebServices + newServeMux := http.NewServeMux() + newServices := []*WebService{} + newIsRegisteredOnRoot := false + for _, each := range c.webServices { + if each.rootPath != ws.rootPath { + // If not registered on root then add specific mapping + if !newIsRegisteredOnRoot { + newIsRegisteredOnRoot = c.addHandler(each, newServeMux) + } + newServices = append(newServices, each) + } + } + c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot + return nil +} + +// logStackOnRecover is the default RecoverHandleFunction and is called +// when DoNotRecover is false and the recoverHandleFunc is not set for the container. +// Default implementation logs the stacktrace and writes the stacktrace on the response. +// This may be a security issue as it exposes sourcecode information. +func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { + var buffer bytes.Buffer + buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) + for i := 2; ; i += 1 { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) + } + log.Print(buffer.String()) + httpWriter.WriteHeader(http.StatusInternalServerError) + httpWriter.Write(buffer.Bytes()) +} + +// writeServiceError is the default ServiceErrorHandleFunction and is called +// when a ServiceError is returned during route selection. Default implementation +// calls resp.WriteErrorString(err.Code, err.Message) +func writeServiceError(err ServiceError, req *Request, resp *Response) { + resp.WriteErrorString(err.Code, err.Message) +} + +// Dispatch the incoming Http Request to a matching WebService. +func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { + if httpWriter == nil { + panic("httpWriter cannot be nil") + } + if httpRequest == nil { + panic("httpRequest cannot be nil") + } + c.dispatch(httpWriter, httpRequest) +} + +// Dispatch the incoming Http Request to a matching WebService. +func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { + writer := httpWriter + + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + // Instal panic recovery unless told otherwise + if !c.doNotRecover { // catch all for 500 response + defer func() { + if r := recover(); r != nil { + c.recoverHandleFunc(r, writer) + return + } + }() + } + + // Find best match Route ; err is non nil if no match was found + var webService *WebService + var route *Route + var err error + func() { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + webService, route, err = c.router.SelectRoute( + c.webServices, + httpRequest) + }() + + // Detect if compression is needed + // assume without compression, test for override + contentEncodingEnabled := c.contentEncodingEnabled + if route != nil && route.contentEncodingEnabled != nil { + contentEncodingEnabled = *route.contentEncodingEnabled + } + if contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + + if err != nil { + // a non-200 response has already been written + // run container filters anyway ; they should not touch the response... + chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { + switch err.(type) { + case ServiceError: + ser := err.(ServiceError) + c.serviceErrorHandleFunc(ser, req, resp) + } + // TODO + }} + chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) + return + } + pathProcessor, routerProcessesPath := c.router.(PathProcessor) + if !routerProcessesPath { + pathProcessor = defaultPathProcessor{} + } + pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) + wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) + // pass through filters (if any) + if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { + // compose filter chain + allFilters := []FilterFunction{} + allFilters = append(allFilters, c.containerFilters...) + allFilters = append(allFilters, webService.filters...) + allFilters = append(allFilters, route.Filters...) + chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) { + // handle request by route after passing all filters + route.Function(wrappedRequest, wrappedResponse) + }} + chain.ProcessFilter(wrappedRequest, wrappedResponse) + } else { + // no filters, handle request by route + route.Function(wrappedRequest, wrappedResponse) + } +} + +// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} +func fixedPrefixPath(pathspec string) string { + varBegin := strings.Index(pathspec, "{") + if -1 == varBegin { + return pathspec + } + return pathspec[:varBegin] +} + +// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server +func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { + c.ServeMux.ServeHTTP(httpwriter, httpRequest) +} + +// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. +func (c *Container) Handle(pattern string, handler http.Handler) { + c.ServeMux.Handle(pattern, handler) +} + +// HandleWithFilter registers the handler for the given pattern. +// Container's filter chain is applied for handler. +// If a handler already exists for pattern, HandleWithFilter panics. +func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { + f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) { + if len(c.containerFilters) == 0 { + handler.ServeHTTP(httpResponse, httpRequest) + return + } + + chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { + handler.ServeHTTP(httpResponse, httpRequest) + }} + chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) + } + + c.Handle(pattern, http.HandlerFunc(f)) +} + +// Filter appends a container FilterFunction. These are called before dispatching +// a http.Request to a WebService from the container +func (c *Container) Filter(filter FilterFunction) { + c.containerFilters = append(c.containerFilters, filter) +} + +// RegisteredWebServices returns the collections of added WebServices +func (c *Container) RegisteredWebServices() []*WebService { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + result := make([]*WebService, len(c.webServices)) + for ix := range c.webServices { + result[ix] = c.webServices[ix] + } + return result +} + +// computeAllowedMethods returns a list of HTTP methods that are valid for a Request +func (c *Container) computeAllowedMethods(req *Request) []string { + // Go through all RegisteredWebServices() and all its Routes to collect the options + methods := []string{} + requestPath := req.Request.URL.Path + for _, ws := range c.RegisteredWebServices() { + matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath) + if matches != nil { + finalMatch := matches[len(matches)-1] + for _, rt := range ws.Routes() { + matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch) + if matches != nil { + lastMatch := matches[len(matches)-1] + if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. + methods = append(methods, rt.Method) + } + } + } + } + } + // methods = append(methods, "OPTIONS") not sure about this + return methods +} + +// newBasicRequestResponse creates a pair of Request,Response from its http versions. +// It is basic because no parameter or (produces) content-type information is given. +func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { + resp := NewResponse(httpWriter) + resp.requestAccept = httpRequest.Header.Get(HEADER_Accept) + return NewRequest(httpRequest), resp +} diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go new file mode 100644 index 00000000..1efeef07 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/cors_filter.go @@ -0,0 +1,202 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "regexp" + "strconv" + "strings" +) + +// CrossOriginResourceSharing is used to create a Container Filter that implements CORS. +// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page +// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. +// +// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing +// http://enable-cors.org/server.html +// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request +type CrossOriginResourceSharing struct { + ExposeHeaders []string // list of Header names + AllowedHeaders []string // list of Header names + AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. + AllowedMethods []string + MaxAge int // number of seconds before requiring new Options request + CookiesAllowed bool + Container *Container + + allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check. +} + +// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html +// and http://www.html5rocks.com/static/images/cors_server_flowchart.png +func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) { + origin := req.Request.Header.Get(HEADER_Origin) + if len(origin) == 0 { + if trace { + traceLogger.Print("no Http header Origin set") + } + chain.ProcessFilter(req, resp) + return + } + if !c.isOriginAllowed(origin) { // check whether this origin is allowed + if trace { + traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns) + } + chain.ProcessFilter(req, resp) + return + } + if req.Request.Method != "OPTIONS" { + c.doActualRequest(req, resp) + chain.ProcessFilter(req, resp) + return + } + if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" { + c.doPreflightRequest(req, resp) + } else { + c.doActualRequest(req, resp) + chain.ProcessFilter(req, resp) + return + } +} + +func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) { + c.setOptionsHeaders(req, resp) + // continue processing the response +} + +func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { + if len(c.AllowedMethods) == 0 { + if c.Container == nil { + c.AllowedMethods = DefaultContainer.computeAllowedMethods(req) + } else { + c.AllowedMethods = c.Container.computeAllowedMethods(req) + } + } + + acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) + if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { + if trace { + traceLogger.Printf("Http header %s:%s is not in %v", + HEADER_AccessControlRequestMethod, + acrm, + c.AllowedMethods) + } + return + } + acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) + if len(acrhs) > 0 { + for _, each := range strings.Split(acrhs, ",") { + if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) { + if trace { + traceLogger.Printf("Http header %s:%s is not in %v", + HEADER_AccessControlRequestHeaders, + acrhs, + c.AllowedHeaders) + } + return + } + } + } + resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ",")) + resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs) + c.setOptionsHeaders(req, resp) + + // return http 200 response, no body +} + +func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) { + c.checkAndSetExposeHeaders(resp) + c.setAllowOriginHeader(req, resp) + c.checkAndSetAllowCredentials(resp) + if c.MaxAge > 0 { + resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge)) + } +} + +func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { + if len(origin) == 0 { + return false + } + if len(c.AllowedDomains) == 0 { + return true + } + + allowed := false + for _, domain := range c.AllowedDomains { + if domain == origin { + allowed = true + break + } + } + + if !allowed { + if len(c.allowedOriginPatterns) == 0 { + // compile allowed domains to allowed origin patterns + allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) + if err != nil { + return false + } + c.allowedOriginPatterns = allowedOriginRegexps + } + + for _, pattern := range c.allowedOriginPatterns { + if allowed = pattern.MatchString(origin); allowed { + break + } + } + } + + return allowed +} + +func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { + origin := req.Request.Header.Get(HEADER_Origin) + if c.isOriginAllowed(origin) { + resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) + } +} + +func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) { + if len(c.ExposeHeaders) > 0 { + resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ",")) + } +} + +func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) { + if c.CookiesAllowed { + resp.AddHeader(HEADER_AccessControlAllowCredentials, "true") + } +} + +func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool { + for _, each := range allowedMethods { + if each == method { + return true + } + } + return false +} + +func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool { + for _, each := range c.AllowedHeaders { + if strings.ToLower(each) == strings.ToLower(header) { + return true + } + } + return false +} + +// Take a list of strings and compile them into a list of regular expressions. +func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { + regexps := []*regexp.Regexp{} + for _, regexpStr := range regexpStrings { + r, err := regexp.Compile(regexpStr) + if err != nil { + return regexps, err + } + regexps = append(regexps, r) + } + return regexps, nil +} diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/coverage.sh new file mode 100644 index 00000000..e27dbf1a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/coverage.sh @@ -0,0 +1,2 @@ +go test -coverprofile=coverage.out +go tool cover -html=coverage.out \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go new file mode 100644 index 00000000..14d5b76b --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/curly.go @@ -0,0 +1,164 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "net/http" + "regexp" + "sort" + "strings" +) + +// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. +type CurlyRouter struct{} + +// SelectRoute is part of the Router interface and returns the best match +// for the WebService and its Route for the given Request. +func (c CurlyRouter) SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) { + + requestTokens := tokenizePath(httpRequest.URL.Path) + + detectedService := c.detectWebService(requestTokens, webServices) + if detectedService == nil { + if trace { + traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path) + } + return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + candidateRoutes := c.selectRoutes(detectedService, requestTokens) + if len(candidateRoutes) == 0 { + if trace { + traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path) + } + return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest) + if selectedRoute == nil { + return detectedService, nil, err + } + return detectedService, selectedRoute, nil +} + +// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. +func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { + candidates := make(sortableCurlyRoutes, 0, 8) + for _, each := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) + if matches { + candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + } + } + sort.Sort(candidates) + return candidates +} + +// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. +func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) { + if len(routeTokens) < len(requestTokens) { + // proceed in matching only if last routeToken is wildcard + count := len(routeTokens) + if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") { + return false, 0, 0 + } + // proceed + } + for i, routeToken := range routeTokens { + if i == len(requestTokens) { + // reached end of request path + return false, 0, 0 + } + requestToken := requestTokens[i] + if strings.HasPrefix(routeToken, "{") { + paramCount++ + if colon := strings.Index(routeToken, ":"); colon != -1 { + // match by regex + matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken) + if !matchesToken { + return false, 0, 0 + } + if matchesRemainder { + break + } + } + } else { // no { prefix + if requestToken != routeToken { + return false, 0, 0 + } + staticCount++ + } + } + return true, paramCount, staticCount +} + +// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens +// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]} +func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) { + regPart := routeToken[colon+1 : len(routeToken)-1] + if regPart == "*" { + if trace { + traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken) + } + return true, true + } + matched, err := regexp.MatchString(regPart, requestToken) + return (matched && err == nil), false +} + +var jsr311Router = RouterJSR311{} + +// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type +// headers of the Request. See also RouterJSR311 in jsr311.go +func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { + // tracing is done inside detectRoute + return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest) +} + +// detectWebService returns the best matching webService given the list of path tokens. +// see also computeWebserviceScore +func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { + var best *WebService + score := -1 + for _, each := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + if matches && (eachScore > score) { + best = each + score = eachScore + } + } + return best +} + +// computeWebserviceScore returns whether tokens match and +// the weighted score of the longest matching consecutive tokens from the beginning. +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { + if len(tokens) > len(requestTokens) { + return false, 0 + } + score := 0 + for i := 0; i < len(tokens); i++ { + each := requestTokens[i] + other := tokens[i] + if len(each) == 0 && len(other) == 0 { + score++ + continue + } + if len(other) > 0 && strings.HasPrefix(other, "{") { + // no empty match + if len(each) == 0 { + return false, score + } + score += 1 + } else { + // not a parameter + if each != other { + return false, score + } + score += (len(tokens) - i) * 10 //fuzzy + } + } + return true, score +} diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go new file mode 100644 index 00000000..403dd3be --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/curly_route.go @@ -0,0 +1,54 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. +type curlyRoute struct { + route Route + paramCount int + staticCount int +} + +// sortableCurlyRoutes orders by most parameters and path elements first. +type sortableCurlyRoutes []curlyRoute + +func (s *sortableCurlyRoutes) add(route curlyRoute) { + *s = append(*s, route) +} + +func (s sortableCurlyRoutes) routes() (routes []Route) { + routes = make([]Route, 0, len(s)) + for _, each := range s { + routes = append(routes, each.route) // TODO change return type + } + return routes +} + +func (s sortableCurlyRoutes) Len() int { + return len(s) +} +func (s sortableCurlyRoutes) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortableCurlyRoutes) Less(i, j int) bool { + a := s[j] + b := s[i] + + // primary key + if a.staticCount < b.staticCount { + return true + } + if a.staticCount > b.staticCount { + return false + } + // secundary key + if a.paramCount < b.paramCount { + return true + } + if a.paramCount > b.paramCount { + return false + } + return a.route.Path < b.route.Path +} diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go new file mode 100644 index 00000000..f7c16b01 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/doc.go @@ -0,0 +1,185 @@ +/* +Package restful , a lean package for creating REST-style WebServices without magic. + +WebServices and Routes + +A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. +Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. +WebServices must be added to a container (see below) in order to handler Http requests from a server. + +A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept). +This package has the logic to find the best matching Route and if found, call its Function. + + ws := new(restful.WebService) + ws. + Path("/users"). + Consumes(restful.MIME_JSON, restful.MIME_XML). + Produces(restful.MIME_JSON, restful.MIME_XML) + + ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource + + ... + + // GET http://localhost:8080/users/1 + func (u UserResource) findUser(request *restful.Request, response *restful.Response) { + id := request.PathParameter("user-id") + ... + } + +The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. + +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. + +Regular expression matching Routes + +A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. +For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. +Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) +This feature requires the use of a CurlyRouter. + +Containers + +A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. +Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. +The Default container of go-restful uses the http.DefaultServeMux. +You can create your own Container and create a new http.Server for that particular container. + + container := restful.NewContainer() + server := &http.Server{Addr: ":8081", Handler: container} + +Filters + +A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. +You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. +In the restful package there are three hooks into the request,response flow where filters can be added. +Each filter must define a FilterFunction: + + func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain) + +Use the following statement to pass the request,response pair to the next filter or RouteFunction + + chain.ProcessFilter(req, resp) + +Container Filters + +These are processed before any registered WebService. + + // install a (global) filter for the default container (processed before any webservice) + restful.Filter(globalLogging) + +WebService Filters + +These are processed before any Route of a WebService. + + // install a webservice filter (processed before any route) + ws.Filter(webserviceLogging).Filter(measureTime) + + +Route Filters + +These are processed before calling the function associated with the Route. + + // install 2 chained route filters (processed before calling findUser) + ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) + +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. + +Response Encoding + +Two encodings are supported: gzip and deflate. To enable this for all responses: + + restful.DefaultContainer.EnableContentEncoding(true) + +If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. +Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. + +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go + +OPTIONS support + +By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. + + Filter(OPTIONSFilter()) + +CORS + +By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. + + cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} + Filter(cors.Filter) + +Error Handling + +Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. +For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. + + 400: Bad Request + +If path or query parameters are not valid (content or type) then use http.StatusBadRequest. + + 404: Not Found + +Despite a valid URI, the resource requested may not be available + + 500: Internal Server Error + +If the application logic could not process the request (or write the response) then use http.StatusInternalServerError. + + 405: Method Not Allowed + +The request has a valid URL but the method (GET,PUT,POST,...) is not allowed. + + 406: Not Acceptable + +The request does not have or has an unknown Accept Header set for this operation. + + 415: Unsupported Media Type + +The request does not have or has an unknown Content-Type Header set for this operation. + +ServiceError + +In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. + +Performance options + +This package has several options that affect the performance of your service. It is important to understand them and how you can change it. + + restful.DefaultContainer.DoNotRecover(false) + +DoNotRecover controls whether panics will be caught to return HTTP 500. +If set to false, the container will recover from panics. +Default value is true + + restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) + +If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. +Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. + +Trouble shooting + +This package has the means to produce detail logging of the complete Http request matching process and filter invocation. +Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: + + restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) + +Logging + +The restful.SetLogger() method allows you to override the logger used by the package. By default restful +uses the standard library `log` package and logs to stdout. Different logging packages are supported as +long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your +preferred package is simple. + +Resources + +[project]: https://github.com/emicklei/go-restful + +[examples]: https://github.com/emicklei/go-restful/blob/master/examples + +[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ + +[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape + +(c) 2012-2015, http://ernestmicklei.com. MIT License +*/ +package restful diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go new file mode 100644 index 00000000..66dfc824 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go @@ -0,0 +1,162 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "encoding/xml" + "strings" + "sync" +) + +// EntityReaderWriter can read and write values using an encoding such as JSON,XML. +type EntityReaderWriter interface { + // Read a serialized version of the value from the request. + // The Request may have a decompressing reader. Depends on Content-Encoding. + Read(req *Request, v interface{}) error + + // Write a serialized version of the value on the response. + // The Response may have a compressing writer. Depends on Accept-Encoding. + // status should be a valid Http Status code + Write(resp *Response, status int, v interface{}) error +} + +// entityAccessRegistry is a singleton +var entityAccessRegistry = &entityReaderWriters{ + protection: new(sync.RWMutex), + accessors: map[string]EntityReaderWriter{}, +} + +// entityReaderWriters associates MIME to an EntityReaderWriter +type entityReaderWriters struct { + protection *sync.RWMutex + accessors map[string]EntityReaderWriter +} + +func init() { + RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON)) + RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML)) +} + +// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. +func RegisterEntityAccessor(mime string, erw EntityReaderWriter) { + entityAccessRegistry.protection.Lock() + defer entityAccessRegistry.protection.Unlock() + entityAccessRegistry.accessors[mime] = erw +} + +// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content. +// This package is already initialized with such an accessor using the MIME_JSON contentType. +func NewEntityAccessorJSON(contentType string) EntityReaderWriter { + return entityJSONAccess{ContentType: contentType} +} + +// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content. +// This package is already initialized with such an accessor using the MIME_XML contentType. +func NewEntityAccessorXML(contentType string) EntityReaderWriter { + return entityXMLAccess{ContentType: contentType} +} + +// accessorAt returns the registered ReaderWriter for this MIME type. +func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) { + r.protection.RLock() + defer r.protection.RUnlock() + er, ok := r.accessors[mime] + if !ok { + // retry with reverse lookup + // more expensive but we are in an exceptional situation anyway + for k, v := range r.accessors { + if strings.Contains(mime, k) { + return v, true + } + } + } + return er, ok +} + +// entityXMLAccess is a EntityReaderWriter for XML encoding +type entityXMLAccess struct { + // This is used for setting the Content-Type header when writing + ContentType string +} + +// Read unmarshalls the value from XML +func (e entityXMLAccess) Read(req *Request, v interface{}) error { + return xml.NewDecoder(req.Request.Body).Decode(v) +} + +// Write marshalls the value to JSON and set the Content-Type Header. +func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error { + return writeXML(resp, status, e.ContentType, v) +} + +// writeXML marshalls the value to JSON and set the Content-Type Header. +func writeXML(resp *Response, status int, contentType string, v interface{}) error { + if v == nil { + resp.WriteHeader(status) + // do not write a nil representation + return nil + } + if resp.prettyPrint { + // pretty output must be created and written explicitly + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + return err + } + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + _, err = resp.Write([]byte(xml.Header)) + if err != nil { + return err + } + _, err = resp.Write(output) + return err + } + // not-so-pretty + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + return xml.NewEncoder(resp).Encode(v) +} + +// entityJSONAccess is a EntityReaderWriter for JSON encoding +type entityJSONAccess struct { + // This is used for setting the Content-Type header when writing + ContentType string +} + +// Read unmarshalls the value from JSON +func (e entityJSONAccess) Read(req *Request, v interface{}) error { + decoder := NewDecoder(req.Request.Body) + decoder.UseNumber() + return decoder.Decode(v) +} + +// Write marshalls the value to JSON and set the Content-Type Header. +func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error { + return writeJSON(resp, status, e.ContentType, v) +} + +// write marshalls the value to JSON and set the Content-Type Header. +func writeJSON(resp *Response, status int, contentType string, v interface{}) error { + if v == nil { + resp.WriteHeader(status) + // do not write a nil representation + return nil + } + if resp.prettyPrint { + // pretty output must be created and written explicitly + output, err := MarshalIndent(v, "", " ") + if err != nil { + return err + } + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + _, err = resp.Write(output) + return err + } + // not-so-pretty + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + return NewEncoder(resp).Encode(v) +} diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go new file mode 100644 index 00000000..c23bfb59 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/filter.go @@ -0,0 +1,35 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. +type FilterChain struct { + Filters []FilterFunction // ordered list of FilterFunction + Index int // index into filters that is currently in progress + Target RouteFunction // function to call after passing all filters +} + +// ProcessFilter passes the request,response pair through the next of Filters. +// Each filter can decide to proceed to the next Filter or handle the Response itself. +func (f *FilterChain) ProcessFilter(request *Request, response *Response) { + if f.Index < len(f.Filters) { + f.Index++ + f.Filters[f.Index-1](request, response, f) + } else { + f.Target(request, response) + } +} + +// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction +type FilterFunction func(*Request, *Response, *FilterChain) + +// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching +// See examples/restful-no-cache-filter.go for usage +func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) { + resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1. + resp.Header().Set("Pragma", "no-cache") // HTTP 1.0. + resp.Header().Set("Expires", "0") // Proxies. + chain.ProcessFilter(req, resp) +} diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/json.go new file mode 100644 index 00000000..87116516 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/json.go @@ -0,0 +1,11 @@ +// +build !jsoniter + +package restful + +import "encoding/json" + +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/jsoniter.go new file mode 100644 index 00000000..11b8f8ae --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/jsoniter.go @@ -0,0 +1,12 @@ +// +build jsoniter + +package restful + +import "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go new file mode 100644 index 00000000..3ede1891 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/jsr311.go @@ -0,0 +1,297 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "errors" + "fmt" + "net/http" + "sort" +) + +// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) +// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html. +// RouterJSR311 implements the Router interface. +// Concept of locators is not implemented. +type RouterJSR311 struct{} + +// SelectRoute is part of the Router interface and returns the best match +// for the WebService and its Route for the given Request. +func (r RouterJSR311) SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) { + + // Identify the root resource class (WebService) + dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices) + if err != nil { + return nil, nil, NewError(http.StatusNotFound, "") + } + // Obtain the set of candidate methods (Routes) + routes := r.selectRoutes(dispatcher, finalMatch) + if len(routes) == 0 { + return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + + // Identify the method (Route) that will handle the request + route, ok := r.detectRoute(routes, httpRequest) + return dispatcher, route, ok +} + +// ExtractParameters is used to obtain the path parameters from the route using the same matching +// engine as the JSR 311 router. +func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string { + webServiceExpr := webService.pathExpr + webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath) + pathParameters := r.extractParams(webServiceExpr, webServiceMatches) + routeExpr := route.pathExpr + routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1]) + routeParams := r.extractParams(routeExpr, routeMatches) + for key, value := range routeParams { + pathParameters[key] = value + } + return pathParameters +} + +func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string { + params := map[string]string{} + for i := 1; i < len(matches); i++ { + if len(pathExpr.VarNames) >= i { + params[pathExpr.VarNames[i-1]] = matches[i] + } + } + return params +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { + candidates := make([]*Route, 0, 8) + for i, each := range routes { + ok := true + for _, fn := range each.If { + if !fn(httpRequest) { + ok = false + break + } + } + if ok { + candidates = append(candidates, &routes[i]) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes)) + } + return nil, NewError(http.StatusNotFound, "404: Not Found") + } + + // http method + previous := candidates + candidates = candidates[:0] + for _, each := range previous { + if httpRequest.Method == each.Method { + candidates = append(candidates, each) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) + } + return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") + } + + // content-type + contentType := httpRequest.Header.Get(HEADER_ContentType) + previous = candidates + candidates = candidates[:0] + for _, each := range previous { + if each.matchesContentType(contentType) { + candidates = append(candidates, each) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) + } + if httpRequest.ContentLength > 0 { + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") + } + } + + // accept + previous = candidates + candidates = candidates[:0] + accept := httpRequest.Header.Get(HEADER_Accept) + if len(accept) == 0 { + accept = "*/*" + } + for _, each := range previous { + if each.matchesAccept(accept) { + candidates = append(candidates, each) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) + } + return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") + } + // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil + return candidates[0], nil +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// n/m > n/* > */* +func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route { + // TODO + return &routes[0] +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2) +func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route { + filtered := &sortableRouteCandidates{} + for _, each := range dispatcher.Routes() { + pathExpr := each.pathExpr + matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder) + if matches != nil { + lastMatch := matches[len(matches)-1] + if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. + filtered.candidates = append(filtered.candidates, + routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount}) + } + } + } + if len(filtered.candidates) == 0 { + if trace { + traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder) + } + return []Route{} + } + sort.Sort(sort.Reverse(filtered)) + + // select other routes from candidates whoes expression matches rmatch + matchingRoutes := []Route{filtered.candidates[0].route} + for c := 1; c < len(filtered.candidates); c++ { + each := filtered.candidates[c] + if each.route.pathExpr.Matcher.MatchString(pathRemainder) { + matchingRoutes = append(matchingRoutes, each.route) + } + } + return matchingRoutes +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1) +func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) { + filtered := &sortableDispatcherCandidates{} + for _, each := range dispatchers { + matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath) + if matches != nil { + filtered.candidates = append(filtered.candidates, + dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount}) + } + } + if len(filtered.candidates) == 0 { + if trace { + traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath) + } + return nil, "", errors.New("not found") + } + sort.Sort(sort.Reverse(filtered)) + return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil +} + +// Types and functions to support the sorting of Routes + +type routeCandidate struct { + route Route + matchesCount int // the number of capturing groups + literalCount int // the number of literal characters (means those not resulting from template variable substitution) + nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) +} + +func (r routeCandidate) expressionToMatch() string { + return r.route.pathExpr.Source +} + +func (r routeCandidate) String() string { + return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount) +} + +type sortableRouteCandidates struct { + candidates []routeCandidate +} + +func (rcs *sortableRouteCandidates) Len() int { + return len(rcs.candidates) +} +func (rcs *sortableRouteCandidates) Swap(i, j int) { + rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i] +} +func (rcs *sortableRouteCandidates) Less(i, j int) bool { + ci := rcs.candidates[i] + cj := rcs.candidates[j] + // primary key + if ci.literalCount < cj.literalCount { + return true + } + if ci.literalCount > cj.literalCount { + return false + } + // secundary key + if ci.matchesCount < cj.matchesCount { + return true + } + if ci.matchesCount > cj.matchesCount { + return false + } + // tertiary key + if ci.nonDefaultCount < cj.nonDefaultCount { + return true + } + if ci.nonDefaultCount > cj.nonDefaultCount { + return false + } + // quaternary key ("source" is interpreted as Path) + return ci.route.Path < cj.route.Path +} + +// Types and functions to support the sorting of Dispatchers + +type dispatcherCandidate struct { + dispatcher *WebService + finalMatch string + matchesCount int // the number of capturing groups + literalCount int // the number of literal characters (means those not resulting from template variable substitution) + nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) +} +type sortableDispatcherCandidates struct { + candidates []dispatcherCandidate +} + +func (dc *sortableDispatcherCandidates) Len() int { + return len(dc.candidates) +} +func (dc *sortableDispatcherCandidates) Swap(i, j int) { + dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] +} +func (dc *sortableDispatcherCandidates) Less(i, j int) bool { + ci := dc.candidates[i] + cj := dc.candidates[j] + // primary key + if ci.matchesCount < cj.matchesCount { + return true + } + if ci.matchesCount > cj.matchesCount { + return false + } + // secundary key + if ci.literalCount < cj.literalCount { + return true + } + if ci.literalCount > cj.literalCount { + return false + } + // tertiary key + return ci.nonDefaultCount < cj.nonDefaultCount +} diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go new file mode 100644 index 00000000..6cd44c7a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/log/log.go @@ -0,0 +1,34 @@ +package log + +import ( + stdlog "log" + "os" +) + +// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) +} + +var Logger StdLogger + +func init() { + // default Logger + SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) +} + +// SetLogger sets the logger for this package +func SetLogger(customLogger StdLogger) { + Logger = customLogger +} + +// Print delegates to the Logger +func Print(v ...interface{}) { + Logger.Print(v...) +} + +// Printf delegates to the Logger +func Printf(format string, v ...interface{}) { + Logger.Printf(format, v...) +} diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go new file mode 100644 index 00000000..6595df00 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/logger.go @@ -0,0 +1,32 @@ +package restful + +// Copyright 2014 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. +import ( + "github.com/emicklei/go-restful/log" +) + +var trace bool = false +var traceLogger log.StdLogger + +func init() { + traceLogger = log.Logger // use the package logger by default +} + +// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set. +// You may call EnableTracing() directly to enable trace logging to the package-wide logger. +func TraceLogger(logger log.StdLogger) { + traceLogger = logger + EnableTracing(logger != nil) +} + +// SetLogger exposes the setter for the global logger on the top-level package +func SetLogger(customLogger log.StdLogger) { + log.SetLogger(customLogger) +} + +// EnableTracing can be used to Trace logging on and off. +func EnableTracing(enabled bool) { + trace = enabled +} diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go new file mode 100644 index 00000000..33014471 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/mime.go @@ -0,0 +1,50 @@ +package restful + +import ( + "strconv" + "strings" +) + +type mime struct { + media string + quality float64 +} + +// insertMime adds a mime to a list and keeps it sorted by quality. +func insertMime(l []mime, e mime) []mime { + for i, each := range l { + // if current mime has lower quality then insert before + if e.quality > each.quality { + left := append([]mime{}, l[0:i]...) + return append(append(left, e), l[i:]...) + } + } + return append(l, e) +} + +const qFactorWeightingKey = "q" + +// sortedMimes returns a list of mime sorted (desc) by its specified quality. +// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3 +func sortedMimes(accept string) (sorted []mime) { + for _, each := range strings.Split(accept, ",") { + typeAndQuality := strings.Split(strings.Trim(each, " "), ";") + if len(typeAndQuality) == 1 { + sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) + } else { + // take factor + qAndWeight := strings.Split(typeAndQuality[1], "=") + if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey { + f, err := strconv.ParseFloat(qAndWeight[1], 64) + if err != nil { + traceLogger.Printf("unable to parse quality in %s, %v", each, err) + } else { + sorted = insertMime(sorted, mime{typeAndQuality[0], f}) + } + } else { + sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) + } + } + } + return +} diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go new file mode 100644 index 00000000..5c1b3425 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/options_filter.go @@ -0,0 +1,34 @@ +package restful + +import "strings" + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method +// and provides the response with a set of allowed methods for the request URL Path. +// As for any filter, you can also install it for a particular WebService within a Container. +// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). +func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) { + if "OPTIONS" != req.Request.Method { + chain.ProcessFilter(req, resp) + return + } + + archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) + methods := strings.Join(c.computeAllowedMethods(req), ",") + origin := req.Request.Header.Get(HEADER_Origin) + + resp.AddHeader(HEADER_Allow, methods) + resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) + resp.AddHeader(HEADER_AccessControlAllowHeaders, archs) + resp.AddHeader(HEADER_AccessControlAllowMethods, methods) +} + +// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method +// and provides the response with a set of allowed methods for the request URL Path. +// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). +func OPTIONSFilter() FilterFunction { + return DefaultContainer.OPTIONSFilter +} diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go new file mode 100644 index 00000000..e8793304 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/parameter.go @@ -0,0 +1,143 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +const ( + // PathParameterKind = indicator of Request parameter type "path" + PathParameterKind = iota + + // QueryParameterKind = indicator of Request parameter type "query" + QueryParameterKind + + // BodyParameterKind = indicator of Request parameter type "body" + BodyParameterKind + + // HeaderParameterKind = indicator of Request parameter type "header" + HeaderParameterKind + + // FormParameterKind = indicator of Request parameter type "form" + FormParameterKind + + // CollectionFormatCSV comma separated values `foo,bar` + CollectionFormatCSV = CollectionFormat("csv") + + // CollectionFormatSSV space separated values `foo bar` + CollectionFormatSSV = CollectionFormat("ssv") + + // CollectionFormatTSV tab separated values `foo\tbar` + CollectionFormatTSV = CollectionFormat("tsv") + + // CollectionFormatPipes pipe separated values `foo|bar` + CollectionFormatPipes = CollectionFormat("pipes") + + // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single + // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters + CollectionFormatMulti = CollectionFormat("multi") +) + +type CollectionFormat string + +func (cf CollectionFormat) String() string { + return string(cf) +} + +// Parameter is for documententing the parameter used in a Http Request +// ParameterData kinds are Path,Query and Body +type Parameter struct { + data *ParameterData +} + +// ParameterData represents the state of a Parameter. +// It is made public to make it accessible to e.g. the Swagger package. +type ParameterData struct { + Name, Description, DataType, DataFormat string + Kind int + Required bool + AllowableValues map[string]string + AllowMultiple bool + DefaultValue string + CollectionFormat string +} + +// Data returns the state of the Parameter +func (p *Parameter) Data() ParameterData { + return *p.data +} + +// Kind returns the parameter type indicator (see const for valid values) +func (p *Parameter) Kind() int { + return p.data.Kind +} + +func (p *Parameter) bePath() *Parameter { + p.data.Kind = PathParameterKind + return p +} +func (p *Parameter) beQuery() *Parameter { + p.data.Kind = QueryParameterKind + return p +} +func (p *Parameter) beBody() *Parameter { + p.data.Kind = BodyParameterKind + return p +} + +func (p *Parameter) beHeader() *Parameter { + p.data.Kind = HeaderParameterKind + return p +} + +func (p *Parameter) beForm() *Parameter { + p.data.Kind = FormParameterKind + return p +} + +// Required sets the required field and returns the receiver +func (p *Parameter) Required(required bool) *Parameter { + p.data.Required = required + return p +} + +// AllowMultiple sets the allowMultiple field and returns the receiver +func (p *Parameter) AllowMultiple(multiple bool) *Parameter { + p.data.AllowMultiple = multiple + return p +} + +// AllowableValues sets the allowableValues field and returns the receiver +func (p *Parameter) AllowableValues(values map[string]string) *Parameter { + p.data.AllowableValues = values + return p +} + +// DataType sets the dataType field and returns the receiver +func (p *Parameter) DataType(typeName string) *Parameter { + p.data.DataType = typeName + return p +} + +// DataFormat sets the dataFormat field for Swagger UI +func (p *Parameter) DataFormat(formatName string) *Parameter { + p.data.DataFormat = formatName + return p +} + +// DefaultValue sets the default value field and returns the receiver +func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { + p.data.DefaultValue = stringRepresentation + return p +} + +// Description sets the description value field and returns the receiver +func (p *Parameter) Description(doc string) *Parameter { + p.data.Description = doc + return p +} + +// CollectionFormat sets the collection format for an array type +func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { + p.data.CollectionFormat = format.String() + return p +} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go new file mode 100644 index 00000000..95a9a254 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/path_expression.go @@ -0,0 +1,74 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "fmt" + "regexp" + "strings" +) + +// PathExpression holds a compiled path expression (RegExp) needed to match against +// Http request paths and to extract path parameter values. +type pathExpression struct { + LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) + VarNames []string // the names of parameters (enclosed by {}) in the path + VarCount int // the number of named parameters (enclosed by {}) in the path + Matcher *regexp.Regexp + Source string // Path as defined by the RouteBuilder + tokens []string +} + +// NewPathExpression creates a PathExpression from the input URL path. +// Returns an error if the path is invalid. +func newPathExpression(path string) (*pathExpression, error) { + expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path) + compiled, err := regexp.Compile(expression) + if err != nil { + return nil, err + } + return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 +func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) { + var buffer bytes.Buffer + buffer.WriteString("^") + //tokens = strings.Split(template, "/") + tokens = tokenizePath(template) + for _, each := range tokens { + if each == "" { + continue + } + buffer.WriteString("/") + if strings.HasPrefix(each, "{") { + // check for regular expression in variable + colon := strings.Index(each, ":") + var varName string + if colon != -1 { + // extract expression + varName = strings.TrimSpace(each[1:colon]) + paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) + if paramExpr == "*" { // special case + buffer.WriteString("(.*)") + } else { + buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache + } + } else { + // plain var + varName = strings.TrimSpace(each[1 : len(each)-1]) + buffer.WriteString("([^/]+?)") + } + varNames = append(varNames, varName) + varCount += 1 + } else { + literalCount += len(each) + encoded := each // TODO URI encode + buffer.WriteString(regexp.QuoteMeta(encoded)) + } + } + return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens +} diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/path_processor.go new file mode 100644 index 00000000..357c723a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/path_processor.go @@ -0,0 +1,63 @@ +package restful + +import ( + "bytes" + "strings" +) + +// Copyright 2018 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path. +// If a Router does not implement this interface then the default behaviour will be used. +type PathProcessor interface { + // ExtractParameters gets the path parameters defined in the route and webService from the urlPath + ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string +} + +type defaultPathProcessor struct{} + +// Extract the parameters from the request url path +func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string { + urlParts := tokenizePath(urlPath) + pathParameters := map[string]string{} + for i, key := range r.pathParts { + var value string + if i >= len(urlParts) { + value = "" + } else { + value = urlParts[i] + } + if strings.HasPrefix(key, "{") { // path-parameter + if colon := strings.Index(key, ":"); colon != -1 { + // extract by regex + regPart := key[colon+1 : len(key)-1] + keyPart := key[1:colon] + if regPart == "*" { + pathParameters[keyPart] = untokenizePath(i, urlParts) + break + } else { + pathParameters[keyPart] = value + } + } else { + // without enclosing {} + pathParameters[key[1:len(key)-1]] = value + } + } + } + return pathParameters +} + +// Untokenize back into an URL path using the slash separator +func untokenizePath(offset int, parts []string) string { + var buffer bytes.Buffer + for p := offset; p < len(parts); p++ { + buffer.WriteString(parts[p]) + // do not end + if p < len(parts)-1 { + buffer.WriteString("/") + } + } + return buffer.String() +} diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go new file mode 100644 index 00000000..a20730fe --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/request.go @@ -0,0 +1,118 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/zlib" + "net/http" +) + +var defaultRequestContentType string + +// Request is a wrapper for a http Request that provides convenience methods +type Request struct { + Request *http.Request + pathParameters map[string]string + attributes map[string]interface{} // for storing request-scoped values + selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees +} + +func NewRequest(httpRequest *http.Request) *Request { + return &Request{ + Request: httpRequest, + pathParameters: map[string]string{}, + attributes: map[string]interface{}{}, + } // empty parameters, attributes +} + +// If ContentType is missing or */* is given then fall back to this type, otherwise +// a "Unable to unmarshal content of type:" response is returned. +// Valid values are restful.MIME_JSON and restful.MIME_XML +// Example: +// restful.DefaultRequestContentType(restful.MIME_JSON) +func DefaultRequestContentType(mime string) { + defaultRequestContentType = mime +} + +// PathParameter accesses the Path parameter value by its name +func (r *Request) PathParameter(name string) string { + return r.pathParameters[name] +} + +// PathParameters accesses the Path parameter values +func (r *Request) PathParameters() map[string]string { + return r.pathParameters +} + +// QueryParameter returns the (first) Query parameter value by its name +func (r *Request) QueryParameter(name string) string { + return r.Request.FormValue(name) +} + +// QueryParameters returns the all the query parameters values by name +func (r *Request) QueryParameters(name string) []string { + return r.Request.URL.Query()[name] +} + +// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. +func (r *Request) BodyParameter(name string) (string, error) { + err := r.Request.ParseForm() + if err != nil { + return "", err + } + return r.Request.PostFormValue(name), nil +} + +// HeaderParameter returns the HTTP Header value of a Header name or empty if missing +func (r *Request) HeaderParameter(name string) string { + return r.Request.Header.Get(name) +} + +// ReadEntity checks the Accept header and reads the content into the entityPointer. +func (r *Request) ReadEntity(entityPointer interface{}) (err error) { + contentType := r.Request.Header.Get(HEADER_ContentType) + contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) + + // check if the request body needs decompression + if ENCODING_GZIP == contentEncoding { + gzipReader := currentCompressorProvider.AcquireGzipReader() + defer currentCompressorProvider.ReleaseGzipReader(gzipReader) + gzipReader.Reset(r.Request.Body) + r.Request.Body = gzipReader + } else if ENCODING_DEFLATE == contentEncoding { + zlibReader, err := zlib.NewReader(r.Request.Body) + if err != nil { + return err + } + r.Request.Body = zlibReader + } + + // lookup the EntityReader, use defaultRequestContentType if needed and provided + entityReader, ok := entityAccessRegistry.accessorAt(contentType) + if !ok { + if len(defaultRequestContentType) != 0 { + entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType) + } + if !ok { + return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) + } + } + return entityReader.Read(r, entityPointer) +} + +// SetAttribute adds or replaces the attribute with the given value. +func (r *Request) SetAttribute(name string, value interface{}) { + r.attributes[name] = value +} + +// Attribute returns the value associated to the given name. Returns nil if absent. +func (r Request) Attribute(name string) interface{} { + return r.attributes[name] +} + +// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees +func (r Request) SelectedRoutePath() string { + return r.selectedRoutePath +} diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go new file mode 100644 index 00000000..fbb48f2d --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/response.go @@ -0,0 +1,255 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bufio" + "errors" + "net" + "net/http" +) + +// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime) +var DefaultResponseMimeType string + +//PrettyPrintResponses controls the indentation feature of XML and JSON serialization +var PrettyPrintResponses = true + +// Response is a wrapper on the actual http ResponseWriter +// It provides several convenience methods to prepare and write response content. +type Response struct { + http.ResponseWriter + requestAccept string // mime-type what the Http Request says it wants to receive + routeProduces []string // mime-types what the Route says it can produce + statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200) + contentLength int // number of bytes written for the response body + prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. + err error // err property is kept when WriteError is called + hijacker http.Hijacker // if underlying ResponseWriter supports it +} + +// NewResponse creates a new response based on a http ResponseWriter. +func NewResponse(httpWriter http.ResponseWriter) *Response { + hijacker, _ := httpWriter.(http.Hijacker) + return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker} +} + +// DefaultResponseContentType set a default. +// If Accept header matching fails, fall back to this type. +// Valid values are restful.MIME_JSON and restful.MIME_XML +// Example: +// restful.DefaultResponseContentType(restful.MIME_JSON) +func DefaultResponseContentType(mime string) { + DefaultResponseMimeType = mime +} + +// InternalServerError writes the StatusInternalServerError header. +// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason) +func (r Response) InternalServerError() Response { + r.WriteHeader(http.StatusInternalServerError) + return r +} + +// Hijack implements the http.Hijacker interface. This expands +// the Response to fulfill http.Hijacker if the underlying +// http.ResponseWriter supports it. +func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if r.hijacker == nil { + return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter") + } + return r.hijacker.Hijack() +} + +// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. +func (r *Response) PrettyPrint(bePretty bool) { + r.prettyPrint = bePretty +} + +// AddHeader is a shortcut for .Header().Add(header,value) +func (r Response) AddHeader(header string, value string) Response { + r.Header().Add(header, value) + return r +} + +// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing. +func (r *Response) SetRequestAccepts(mime string) { + r.requestAccept = mime +} + +// EntityWriter returns the registered EntityWriter that the entity (requested resource) +// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. +// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. +func (r *Response) EntityWriter() (EntityReaderWriter, bool) { + sorted := sortedMimes(r.requestAccept) + for _, eachAccept := range sorted { + for _, eachProduce := range r.routeProduces { + if eachProduce == eachAccept.media { + if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok { + return w, true + } + } + } + if eachAccept.media == "*/*" { + for _, each := range r.routeProduces { + if w, ok := entityAccessRegistry.accessorAt(each); ok { + return w, true + } + } + } + } + // if requestAccept is empty + writer, ok := entityAccessRegistry.accessorAt(r.requestAccept) + if !ok { + // if not registered then fallback to the defaults (if set) + if DefaultResponseMimeType == MIME_JSON { + return entityAccessRegistry.accessorAt(MIME_JSON) + } + if DefaultResponseMimeType == MIME_XML { + return entityAccessRegistry.accessorAt(MIME_XML) + } + // Fallback to whatever the route says it can produce. + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + for _, each := range r.routeProduces { + if w, ok := entityAccessRegistry.accessorAt(each); ok { + return w, true + } + } + if trace { + traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) + } + } + return writer, ok +} + +// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200) +func (r *Response) WriteEntity(value interface{}) error { + return r.WriteHeaderAndEntity(http.StatusOK, value) +} + +// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters. +// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces. +// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header. +// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead. +// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written. +// Current implementation ignores any q-parameters in the Accept Header. +// Returns an error if the value could not be written on the response. +func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error { + writer, ok := r.EntityWriter() + if !ok { + r.WriteHeader(http.StatusNotAcceptable) + return nil + } + return writer.Write(r, status, value) +} + +// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) +// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteAsXml(value interface{}) error { + return writeXML(r, http.StatusOK, MIME_XML, value) +} + +// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) +// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { + return writeXML(r, status, MIME_XML, value) +} + +// WriteAsJson is a convenience method for writing a value in json. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteAsJson(value interface{}) error { + return writeJSON(r, http.StatusOK, MIME_JSON, value) +} + +// WriteJson is a convenience method for writing a value in Json with a given Content-Type. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteJson(value interface{}, contentType string) error { + return writeJSON(r, http.StatusOK, contentType, value) +} + +// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error { + return writeJSON(r, status, contentType, value) +} + +// WriteError write the http status and the error string on the response. err can be nil. +func (r *Response) WriteError(httpStatus int, err error) error { + r.err = err + if err == nil { + r.WriteErrorString(httpStatus, "") + } else { + r.WriteErrorString(httpStatus, err.Error()) + } + return err +} + +// WriteServiceError is a convenience method for a responding with a status and a ServiceError +func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error { + r.err = err + return r.WriteHeaderAndEntity(httpStatus, err) +} + +// WriteErrorString is a convenience method for an error status with the actual error +func (r *Response) WriteErrorString(httpStatus int, errorReason string) error { + if r.err == nil { + // if not called from WriteError + r.err = errors.New(errorReason) + } + r.WriteHeader(httpStatus) + if _, err := r.Write([]byte(errorReason)); err != nil { + return err + } + return nil +} + +// Flush implements http.Flusher interface, which sends any buffered data to the client. +func (r *Response) Flush() { + if f, ok := r.ResponseWriter.(http.Flusher); ok { + f.Flush() + } else if trace { + traceLogger.Printf("ResponseWriter %v doesn't support Flush", r) + } +} + +// WriteHeader is overridden to remember the Status Code that has been written. +// Changes to the Header of the response have no effect after this. +func (r *Response) WriteHeader(httpStatus int) { + r.statusCode = httpStatus + r.ResponseWriter.WriteHeader(httpStatus) +} + +// StatusCode returns the code that has been written using WriteHeader. +func (r Response) StatusCode() int { + if 0 == r.statusCode { + // no status code has been written yet; assume OK + return http.StatusOK + } + return r.statusCode +} + +// Write writes the data to the connection as part of an HTTP reply. +// Write is part of http.ResponseWriter interface. +func (r *Response) Write(bytes []byte) (int, error) { + written, err := r.ResponseWriter.Write(bytes) + r.contentLength += written + return written, err +} + +// ContentLength returns the number of bytes written for the response content. +// Note that this value is only correct if all data is written through the Response using its Write* methods. +// Data written directly using the underlying http.ResponseWriter is not accounted for. +func (r Response) ContentLength() int { + return r.contentLength +} + +// CloseNotify is part of http.CloseNotifier interface +func (r Response) CloseNotify() <-chan bool { + return r.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +// Error returns the err created by WriteError +func (r Response) Error() error { + return r.err +} diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go new file mode 100644 index 00000000..6d15dbf6 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/route.go @@ -0,0 +1,170 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "net/http" + "strings" +) + +// RouteFunction declares the signature of a function that can be bound to a Route. +type RouteFunction func(*Request, *Response) + +// RouteSelectionConditionFunction declares the signature of a function that +// can be used to add extra conditional logic when selecting whether the route +// matches the HTTP request. +type RouteSelectionConditionFunction func(httpRequest *http.Request) bool + +// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. +type Route struct { + Method string + Produces []string + Consumes []string + Path string // webservice root path + described path + Function RouteFunction + Filters []FilterFunction + If []RouteSelectionConditionFunction + + // cached values for dispatching + relativePath string + pathParts []string + pathExpr *pathExpression // cached compilation of relativePath as RegExp + + // documentation + Doc string + Notes string + Operation string + ParameterDocs []*Parameter + ResponseErrors map[int]ResponseError + DefaultResponse *ResponseError + ReadSample, WriteSample interface{} // structs that model an example request or response payload + + // Extra information used to store custom information about the route. + Metadata map[string]interface{} + + // marks a route as deprecated + Deprecated bool + + //Overrides the container.contentEncodingEnabled + contentEncodingEnabled *bool +} + +// Initialize for Route +func (r *Route) postBuild() { + r.pathParts = tokenizePath(r.Path) +} + +// Create Request and Response from their http versions +func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { + wrappedRequest := NewRequest(httpRequest) + wrappedRequest.pathParameters = pathParams + wrappedRequest.selectedRoutePath = r.Path + wrappedResponse := NewResponse(httpWriter) + wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) + wrappedResponse.routeProduces = r.Produces + return wrappedRequest, wrappedResponse +} + +// dispatchWithFilters call the function after passing through its own filters +func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) { + if len(r.Filters) > 0 { + chain := FilterChain{Filters: r.Filters, Target: r.Function} + chain.ProcessFilter(wrappedRequest, wrappedResponse) + } else { + // unfiltered + r.Function(wrappedRequest, wrappedResponse) + } +} + +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +// Return whether the mimeType matches to what this Route can produce. +func (r Route) matchesAccept(mimeTypesWithQuality string) bool { + remaining := mimeTypesWithQuality + for { + var mimeType string + if end := strings.Index(remaining, ","); end == -1 { + mimeType, remaining = remaining, "" + } else { + mimeType, remaining = remaining[:end], remaining[end+1:] + } + if quality := strings.Index(mimeType, ";"); quality != -1 { + mimeType = mimeType[:quality] + } + mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) + if mimeType == "*/*" { + return true + } + for _, producibleType := range r.Produces { + if producibleType == "*/*" || producibleType == mimeType { + return true + } + } + if len(remaining) == 0 { + return false + } + } +} + +// Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +func (r Route) matchesContentType(mimeTypes string) bool { + + if len(r.Consumes) == 0 { + // did not specify what it can consume ; any media type (“*/*”) is assumed + return true + } + + if len(mimeTypes) == 0 { + // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type + m := r.Method + if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { + return true + } + // proceed with default + mimeTypes = MIME_OCTET + } + + remaining := mimeTypes + for { + var mimeType string + if end := strings.Index(remaining, ","); end == -1 { + mimeType, remaining = remaining, "" + } else { + mimeType, remaining = remaining[:end], remaining[end+1:] + } + if quality := strings.Index(mimeType, ";"); quality != -1 { + mimeType = mimeType[:quality] + } + mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) + for _, consumeableType := range r.Consumes { + if consumeableType == "*/*" || consumeableType == mimeType { + return true + } + } + if len(remaining) == 0 { + return false + } + } +} + +// Tokenize an URL path using the slash separator ; the result does not have empty tokens +func tokenizePath(path string) []string { + if "/" == path { + return nil + } + return strings.Split(strings.Trim(path, "/"), "/") +} + +// for debugging +func (r Route) String() string { + return r.Method + " " + r.Path +} + +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. +func (r Route) EnableContentEncoding(enabled bool) { + r.contentEncodingEnabled = &enabled +} diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go new file mode 100644 index 00000000..0fccf61e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/route_builder.go @@ -0,0 +1,326 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync/atomic" + + "github.com/emicklei/go-restful/log" +) + +// RouteBuilder is a helper to construct Routes. +type RouteBuilder struct { + rootPath string + currentPath string + produces []string + consumes []string + httpMethod string // required + function RouteFunction // required + filters []FilterFunction + conditions []RouteSelectionConditionFunction + + typeNameHandleFunc TypeNameHandleFunction // required + + // documentation + doc string + notes string + operation string + readSample, writeSample interface{} + parameters []*Parameter + errorMap map[int]ResponseError + defaultResponse *ResponseError + metadata map[string]interface{} + deprecated bool + contentEncodingEnabled *bool +} + +// Do evaluates each argument with the RouteBuilder itself. +// This allows you to follow DRY principles without breaking the fluent programming style. +// Example: +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } +func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { + for _, each := range oneArgBlocks { + each(b) + } + return b +} + +// To bind the route to a function. +// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required. +func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder { + b.function = function + return b +} + +// Method specifies what HTTP method to match. Required. +func (b *RouteBuilder) Method(method string) *RouteBuilder { + b.httpMethod = method + return b +} + +// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header. +func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder { + b.produces = mimeTypes + return b +} + +// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these +func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder { + b.consumes = mimeTypes + return b +} + +// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/". +func (b *RouteBuilder) Path(subPath string) *RouteBuilder { + b.currentPath = subPath + return b +} + +// Doc tells what this route is all about. Optional. +func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { + b.doc = documentation + return b +} + +// Notes is a verbose explanation of the operation behavior. Optional. +func (b *RouteBuilder) Notes(notes string) *RouteBuilder { + b.notes = notes + return b +} + +// Reads tells what resource type will be read from the request payload. Optional. +// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. +func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder { + fn := b.typeNameHandleFunc + if fn == nil { + fn = reflectTypeName + } + typeAsName := fn(sample) + description := "" + if len(optionalDescription) > 0 { + description = optionalDescription[0] + } + b.readSample = sample + bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}} + bodyParameter.beBody() + bodyParameter.Required(true) + bodyParameter.DataType(typeAsName) + b.Param(bodyParameter) + return b +} + +// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not. +// Use this to modify or extend information for the Parameter (through its Data()). +func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { + for _, each := range b.parameters { + if each.Data().Name == name { + return each + } + } + return p +} + +// Writes tells what resource type will be written as the response payload. Optional. +func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { + b.writeSample = sample + return b +} + +// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates). +func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder { + if b.parameters == nil { + b.parameters = []*Parameter{} + } + b.parameters = append(b.parameters, parameter) + return b +} + +// Operation allows you to document what the actual method/function call is of the Route. +// Unless called, the operation name is derived from the RouteFunction set using To(..). +func (b *RouteBuilder) Operation(name string) *RouteBuilder { + b.operation = name + return b +} + +// ReturnsError is deprecated, use Returns instead. +func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder { + log.Print("ReturnsError is deprecated, use Returns instead.") + return b.Returns(code, message, model) +} + +// Returns allows you to document what responses (errors or regular) can be expected. +// The model parameter is optional ; either pass a struct instance or use nil if not applicable. +func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder { + err := ResponseError{ + Code: code, + Message: message, + Model: model, + IsDefault: false, // this field is deprecated, use default response instead. + } + // lazy init because there is no NewRouteBuilder (yet) + if b.errorMap == nil { + b.errorMap = map[int]ResponseError{} + } + b.errorMap[code] = err + return b +} + +// DefaultReturns is a special Returns call that sets the default of the response. +func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { + b.defaultResponse = &ResponseError{ + Message: message, + Model: model, + } + return b +} + +// Metadata adds or updates a key=value pair to the metadata map. +func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { + if b.metadata == nil { + b.metadata = map[string]interface{}{} + } + b.metadata[key] = value + return b +} + +// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use +func (b *RouteBuilder) Deprecate() *RouteBuilder { + b.deprecated = true + return b +} + +// ResponseError represents a response; not necessarily an error. +type ResponseError struct { + Code int + Message string + Model interface{} + IsDefault bool +} + +func (b *RouteBuilder) servicePath(path string) *RouteBuilder { + b.rootPath = path + return b +} + +// Filter appends a FilterFunction to the end of filters for this Route to build. +func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { + b.filters = append(b.filters, filter) + return b +} + +// If sets a condition function that controls matching the Route based on custom logic. +// The condition function is provided the HTTP request and should return true if the route +// should be considered. +// +// Efficiency note: the condition function is called before checking the method, produces, and +// consumes criteria, so that the correct HTTP status code can be returned. +// +// Lifecycle note: no filter functions have been called prior to calling the condition function, +// so the condition function should not depend on any context that might be set up by container +// or route filters. +func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder { + b.conditions = append(b.conditions, condition) + return b +} + +// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response. +func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder { + b.contentEncodingEnabled = &enabled + return b +} + +// If no specific Route path then set to rootPath +// If no specific Produces then set to rootProduces +// If no specific Consumes then set to rootConsumes +func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) { + if len(b.produces) == 0 { + b.produces = rootProduces + } + if len(b.consumes) == 0 { + b.consumes = rootConsumes + } +} + +// typeNameHandler sets the function that will convert types to strings in the parameter +// and model definitions. +func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder { + b.typeNameHandleFunc = handler + return b +} + +// Build creates a new Route using the specification details collected by the RouteBuilder +func (b *RouteBuilder) Build() Route { + pathExpr, err := newPathExpression(b.currentPath) + if err != nil { + log.Printf("Invalid path:%s because:%v", b.currentPath, err) + os.Exit(1) + } + if b.function == nil { + log.Printf("No function specified for route:" + b.currentPath) + os.Exit(1) + } + operationName := b.operation + if len(operationName) == 0 && b.function != nil { + // extract from definition + operationName = nameOfFunction(b.function) + } + route := Route{ + Method: b.httpMethod, + Path: concatPath(b.rootPath, b.currentPath), + Produces: b.produces, + Consumes: b.consumes, + Function: b.function, + Filters: b.filters, + If: b.conditions, + relativePath: b.currentPath, + pathExpr: pathExpr, + Doc: b.doc, + Notes: b.notes, + Operation: operationName, + ParameterDocs: b.parameters, + ResponseErrors: b.errorMap, + DefaultResponse: b.defaultResponse, + ReadSample: b.readSample, + WriteSample: b.writeSample, + Metadata: b.metadata, + Deprecated: b.deprecated, + contentEncodingEnabled: b.contentEncodingEnabled, + } + route.postBuild() + return route +} + +func concatPath(path1, path2 string) string { + return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") +} + +var anonymousFuncCount int32 + +// nameOfFunction returns the short name of the function f for documentation. +// It uses a runtime feature for debugging ; its value may change for later Go versions. +func nameOfFunction(f interface{}) string { + fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) + tokenized := strings.Split(fun.Name(), ".") + last := tokenized[len(tokenized)-1] + last = strings.TrimSuffix(last, ")·fm") // < Go 1.5 + last = strings.TrimSuffix(last, ")-fm") // Go 1.5 + last = strings.TrimSuffix(last, "·fm") // < Go 1.5 + last = strings.TrimSuffix(last, "-fm") // Go 1.5 + if last == "func1" { // this could mean conflicts in API docs + val := atomic.AddInt32(&anonymousFuncCount, 1) + last = "func" + fmt.Sprintf("%d", val) + atomic.StoreInt32(&anonymousFuncCount, val) + } + return last +} diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go new file mode 100644 index 00000000..19078af1 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/router.go @@ -0,0 +1,20 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import "net/http" + +// A RouteSelector finds the best matching Route given the input HTTP Request +// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the +// path parameters after the route has been selected. +type RouteSelector interface { + + // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. + // It returns a selected Route and its containing WebService or an error indicating + // a problem. + SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) +} diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go new file mode 100644 index 00000000..62d1108b --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/service_error.go @@ -0,0 +1,23 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import "fmt" + +// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. +type ServiceError struct { + Code int + Message string +} + +// NewError returns a ServiceError using the code and reason +func NewError(code int, message string) ServiceError { + return ServiceError{Code: code, Message: message} +} + +// Error returns a text representation of the service error +func (s ServiceError) Error() string { + return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) +} diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go new file mode 100644 index 00000000..77ba9a8c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/web_service.go @@ -0,0 +1,290 @@ +package restful + +import ( + "errors" + "os" + "reflect" + "sync" + + "github.com/emicklei/go-restful/log" +) + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// WebService holds a collection of Route values that bind a Http Method + URL Path to a function. +type WebService struct { + rootPath string + pathExpr *pathExpression // cached compilation of rootPath as RegExp + routes []Route + produces []string + consumes []string + pathParameters []*Parameter + filters []FilterFunction + documentation string + apiVersion string + + typeNameHandleFunc TypeNameHandleFunction + + dynamicRoutes bool + + // protects 'routes' if dynamic routes are enabled + routesLock sync.RWMutex +} + +func (w *WebService) SetDynamicRoutes(enable bool) { + w.dynamicRoutes = enable +} + +// TypeNameHandleFunction declares functions that can handle translating the name of a sample object +// into the restful documentation for the service. +type TypeNameHandleFunction func(sample interface{}) string + +// TypeNameHandler sets the function that will convert types to strings in the parameter +// and model definitions. If not set, the web service will invoke +// reflect.TypeOf(object).String(). +func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService { + w.typeNameHandleFunc = handler + return w +} + +// reflectTypeName is the default TypeNameHandleFunction and for a given object +// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via +// the reflection API. +func reflectTypeName(sample interface{}) string { + return reflect.TypeOf(sample).String() +} + +// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. +func (w *WebService) compilePathExpression() { + compiled, err := newPathExpression(w.rootPath) + if err != nil { + log.Printf("invalid path:%s because:%v", w.rootPath, err) + os.Exit(1) + } + w.pathExpr = compiled +} + +// ApiVersion sets the API version for documentation purposes. +func (w *WebService) ApiVersion(apiVersion string) *WebService { + w.apiVersion = apiVersion + return w +} + +// Version returns the API version for documentation purposes. +func (w *WebService) Version() string { return w.apiVersion } + +// Path specifies the root URL template path of the WebService. +// All Routes will be relative to this path. +func (w *WebService) Path(root string) *WebService { + w.rootPath = root + if len(w.rootPath) == 0 { + w.rootPath = "/" + } + w.compilePathExpression() + return w +} + +// Param adds a PathParameter to document parameters used in the root path. +func (w *WebService) Param(parameter *Parameter) *WebService { + if w.pathParameters == nil { + w.pathParameters = []*Parameter{} + } + w.pathParameters = append(w.pathParameters, parameter) + return w +} + +// PathParameter creates a new Parameter of kind Path for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) PathParameter(name, description string) *Parameter { + return PathParameter(name, description) +} + +// PathParameter creates a new Parameter of kind Path for documentation purposes. +// It is initialized as required with string as its DataType. +func PathParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}} + p.bePath() + return p +} + +// QueryParameter creates a new Parameter of kind Query for documentation purposes. +// It is initialized as not required with string as its DataType. +func (w *WebService) QueryParameter(name, description string) *Parameter { + return QueryParameter(name, description) +} + +// QueryParameter creates a new Parameter of kind Query for documentation purposes. +// It is initialized as not required with string as its DataType. +func QueryParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}} + p.beQuery() + return p +} + +// BodyParameter creates a new Parameter of kind Body for documentation purposes. +// It is initialized as required without a DataType. +func (w *WebService) BodyParameter(name, description string) *Parameter { + return BodyParameter(name, description) +} + +// BodyParameter creates a new Parameter of kind Body for documentation purposes. +// It is initialized as required without a DataType. +func BodyParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}} + p.beBody() + return p +} + +// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. +// It is initialized as not required with string as its DataType. +func (w *WebService) HeaderParameter(name, description string) *Parameter { + return HeaderParameter(name, description) +} + +// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. +// It is initialized as not required with string as its DataType. +func HeaderParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beHeader() + return p +} + +// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) FormParameter(name, description string) *Parameter { + return FormParameter(name, description) +} + +// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. +// It is initialized as required with string as its DataType. +func FormParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beForm() + return p +} + +// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. +func (w *WebService) Route(builder *RouteBuilder) *WebService { + w.routesLock.Lock() + defer w.routesLock.Unlock() + builder.copyDefaults(w.produces, w.consumes) + w.routes = append(w.routes, builder.Build()) + return w +} + +// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' +func (w *WebService) RemoveRoute(path, method string) error { + if !w.dynamicRoutes { + return errors.New("dynamic routes are not enabled.") + } + w.routesLock.Lock() + defer w.routesLock.Unlock() + newRoutes := make([]Route, (len(w.routes) - 1)) + current := 0 + for ix := range w.routes { + if w.routes[ix].Method == method && w.routes[ix].Path == path { + continue + } + newRoutes[current] = w.routes[ix] + current = current + 1 + } + w.routes = newRoutes + return nil +} + +// Method creates a new RouteBuilder and initialize its http method +func (w *WebService) Method(httpMethod string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod) +} + +// Produces specifies that this WebService can produce one or more MIME types. +// Http requests must have one of these values set for the Accept header. +func (w *WebService) Produces(contentTypes ...string) *WebService { + w.produces = contentTypes + return w +} + +// Consumes specifies that this WebService can consume one or more MIME types. +// Http requests must have one of these values set for the Content-Type header. +func (w *WebService) Consumes(accepts ...string) *WebService { + w.consumes = accepts + return w +} + +// Routes returns the Routes associated with this WebService +func (w *WebService) Routes() []Route { + if !w.dynamicRoutes { + return w.routes + } + // Make a copy of the array to prevent concurrency problems + w.routesLock.RLock() + defer w.routesLock.RUnlock() + result := make([]Route, len(w.routes)) + for ix := range w.routes { + result[ix] = w.routes[ix] + } + return result +} + +// RootPath returns the RootPath associated with this WebService. Default "/" +func (w *WebService) RootPath() string { + return w.rootPath +} + +// PathParameters return the path parameter names for (shared among its Routes) +func (w *WebService) PathParameters() []*Parameter { + return w.pathParameters +} + +// Filter adds a filter function to the chain of filters applicable to all its Routes +func (w *WebService) Filter(filter FilterFunction) *WebService { + w.filters = append(w.filters, filter) + return w +} + +// Doc is used to set the documentation of this service. +func (w *WebService) Doc(plainText string) *WebService { + w.documentation = plainText + return w +} + +// Documentation returns it. +func (w *WebService) Documentation() string { + return w.documentation +} + +/* + Convenience methods +*/ + +// HEAD is a shortcut for .Method("HEAD").Path(subPath) +func (w *WebService) HEAD(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath) +} + +// GET is a shortcut for .Method("GET").Path(subPath) +func (w *WebService) GET(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath) +} + +// POST is a shortcut for .Method("POST").Path(subPath) +func (w *WebService) POST(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath) +} + +// PUT is a shortcut for .Method("PUT").Path(subPath) +func (w *WebService) PUT(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath) +} + +// PATCH is a shortcut for .Method("PATCH").Path(subPath) +func (w *WebService) PATCH(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath) +} + +// DELETE is a shortcut for .Method("DELETE").Path(subPath) +func (w *WebService) DELETE(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath) +} diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/web_service_container.go new file mode 100644 index 00000000..c9d31b06 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/web_service_container.go @@ -0,0 +1,39 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "net/http" +) + +// DefaultContainer is a restful.Container that uses http.DefaultServeMux +var DefaultContainer *Container + +func init() { + DefaultContainer = NewContainer() + DefaultContainer.ServeMux = http.DefaultServeMux +} + +// If set the true then panics will not be caught to return HTTP 500. +// In that case, Route functions are responsible for handling any error situation. +// Default value is false = recover from panics. This has performance implications. +// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true) +var DoNotRecover = false + +// Add registers a new WebService add it to the DefaultContainer. +func Add(service *WebService) { + DefaultContainer.Add(service) +} + +// Filter appends a container FilterFunction from the DefaultContainer. +// These are called before dispatching a http.Request to a WebService. +func Filter(filter FilterFunction) { + DefaultContainer.Filter(filter) +} + +// RegisteredWebServices returns the collections of WebServices from the DefaultContainer +func RegisteredWebServices() []*WebService { + return DefaultContainer.RegisteredWebServices() +} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml index 2092c72c..50e4afd1 100644 --- a/vendor/github.com/evanphx/json-patch/.travis.yml +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - 1.8 - - 1.7 + - 1.14 + - 1.13 install: - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi @@ -11,6 +11,9 @@ install: script: - go get - go test -cover ./... + - cd ./v5 + - go get + - go test -cover ./... notifications: email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE index 0eb9b72d..df76d7d7 100644 --- a/vendor/github.com/evanphx/json-patch/LICENSE +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -6,7 +6,7 @@ modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Evan Phoenix nor the names of its contributors diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 9c7f87f7..121b039d 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -1,5 +1,5 @@ # JSON-Patch -`jsonpatch` is a library which provides functionallity for both applying +`jsonpatch` is a library which provides functionality for both applying [RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). @@ -11,10 +11,11 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie **Latest and greatest**: ```bash -go get -u github.com/evanphx/json-patch +go get -u github.com/evanphx/json-patch/v5 ``` **Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` * Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` (previous versions below `v3` are unavailable) @@ -82,7 +83,7 @@ When ran, you get the following output: ```bash $ go run main.go patch document: {"height":null,"name":"Jane"} -updated tina doc: {"age":28,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} ``` ## Create and apply a JSON Patch @@ -164,7 +165,7 @@ func main() { } if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "similar"`) + fmt.Println(`"original" is _not_ structurally equal to "different"`) } } ``` @@ -173,7 +174,7 @@ When ran, you get the following output: ```bash $ go run main.go "original" is structurally equal to "similar" -"original" is _not_ structurally equal to "similar" +"original" is _not_ structurally equal to "different" ``` ## Combine merge patches diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go index 6806c4c2..14e8bb5c 100644 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -307,13 +307,16 @@ func matchesValue(av, bv interface{}) bool { return true case map[string]interface{}: bt := bv.(map[string]interface{}) - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } + if len(bt) != len(at) { + return false } for key := range bt { - if !matchesValue(at[key], bt[key]) { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { return false } } diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index c9cf5902..f185a45b 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -6,6 +6,8 @@ import ( "fmt" "strconv" "strings" + + "github.com/pkg/errors" ) const ( @@ -24,6 +26,14 @@ var ( AccumulatedCopySizeLimit int64 = 0 ) +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + type lazyNode struct { raw *json.RawMessage doc partialDoc @@ -31,10 +41,11 @@ type lazyNode struct { which int } -type operation map[string]*json.RawMessage +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage -// Patch is an ordered collection of operations. -type Patch []operation +// Patch is an ordered collection of Operations. +type Patch []Operation type partialDoc map[string]*lazyNode type partialArray []*lazyNode @@ -59,7 +70,7 @@ func (n *lazyNode) MarshalJSON() ([]byte, error) { case eAry: return json.Marshal(n.ary) default: - return nil, fmt.Errorf("Unknown type") + return nil, ErrUnknownType } } @@ -91,7 +102,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) { } if n.raw == nil { - return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document") + return nil, ErrInvalid } err := json.Unmarshal(*n.raw, &n.doc) @@ -110,7 +121,7 @@ func (n *lazyNode) intoAry() (*partialArray, error) { } if n.raw == nil { - return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array") + return nil, ErrInvalid } err := json.Unmarshal(*n.raw, &n.ary) @@ -191,6 +202,10 @@ func (n *lazyNode) equal(o *lazyNode) bool { return false } + if len(n.doc) != len(o.doc) { + return false + } + for k, v := range n.doc { ov, ok := o.doc[k] @@ -198,6 +213,10 @@ func (n *lazyNode) equal(o *lazyNode) bool { return false } + if (v == nil) != (ov == nil) { + return false + } + if v == nil && ov == nil { continue } @@ -227,7 +246,8 @@ func (n *lazyNode) equal(o *lazyNode) bool { return true } -func (o operation) kind() string { +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { if obj, ok := o["op"]; ok && obj != nil { var op string @@ -243,39 +263,41 @@ func (o operation) kind() string { return "unknown" } -func (o operation) path() string { +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { if obj, ok := o["path"]; ok && obj != nil { var op string err := json.Unmarshal(*obj, &op) if err != nil { - return "unknown" + return "unknown", err } - return op + return op, nil } - return "unknown" + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") } -func (o operation) from() string { +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { if obj, ok := o["from"]; ok && obj != nil { var op string err := json.Unmarshal(*obj, &op) if err != nil { - return "unknown" + return "unknown", err } - return op + return op, nil } - return "unknown" + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") } -func (o operation) value() *lazyNode { +func (o Operation) value() *lazyNode { if obj, ok := o["value"]; ok { return newLazyNode(obj) } @@ -283,6 +305,23 @@ func (o operation) value() *lazyNode { return nil } +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + func isArray(buf []byte) bool { Loop: for _, c := range buf { @@ -359,7 +398,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) { func (d *partialDoc) remove(key string) error { _, ok := (*d)[key] if !ok { - return fmt.Errorf("Unable to remove nonexistent key: %s", key) + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) } delete(*d, key) @@ -385,7 +424,7 @@ func (d *partialArray) add(key string, val *lazyNode) error { idx, err := strconv.Atoi(key) if err != nil { - return err + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) } sz := len(*d) + 1 @@ -395,17 +434,17 @@ func (d *partialArray) add(key string, val *lazyNode) error { cur := *d if idx >= len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - if SupportNegativeIndices { - if idx < -len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - - if idx < 0 { - idx += len(ary) + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } + idx += len(ary) } copy(ary[0:idx], cur[0:idx]) @@ -424,7 +463,7 @@ func (d *partialArray) get(key string) (*lazyNode, error) { } if idx >= len(*d) { - return nil, fmt.Errorf("Unable to access invalid index: %d", idx) + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } return (*d)[idx], nil @@ -439,17 +478,17 @@ func (d *partialArray) remove(key string) error { cur := *d if idx >= len(cur) { - return fmt.Errorf("Unable to access invalid index: %d", idx) + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - if SupportNegativeIndices { - if idx < -len(cur) { - return fmt.Errorf("Unable to access invalid index: %d", idx) + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - - if idx < 0 { - idx += len(cur) + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } + idx += len(cur) } ary := make([]*lazyNode, len(cur)-1) @@ -462,140 +501,189 @@ func (d *partialArray) remove(key string) error { } -func (p Patch) add(doc *container, op operation) error { - path := op.path() +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path) + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) } - return con.add(key, op.value()) + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil } -func (p Patch) remove(doc *container, op operation) error { - path := op.path() +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path) + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) } - return con.remove(key) + return nil } -func (p Patch) replace(doc *container, op operation) error { - path := op.path() +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) } _, ok := con.get(key) if ok != nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) } - return con.set(key, op.value()) + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil } -func (p Patch) move(doc *container, op operation) error { - from := op.from() +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } con, key := findObject(doc, from) if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) } val, err := con.get(key) if err != nil { - return err + return errors.Wrapf(err, "error in move for path: '%s'", key) } err = con.remove(key) if err != nil { - return err + return errors.Wrapf(err, "error in move for path: '%s'", key) } - path := op.path() + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } con, key = findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) } - return con.add(key, val) + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil } -func (p Patch) test(doc *container, op operation) error { - path := op.path() +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) } val, err := con.get(key) - if err != nil { - return err + return errors.Wrapf(err, "error in test for path: '%s'", path) } if val == nil { if op.value().raw == nil { return nil } - return fmt.Errorf("Testing value %s failed", path) + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) } else if op.value() == nil { - return fmt.Errorf("Testing value %s failed", path) + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) } if val.equal(op.value()) { return nil } - return fmt.Errorf("Testing value %s failed", path) + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) } -func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error { - from := op.from() +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } con, key := findObject(doc, from) if con == nil { - return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from) + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) } val, err := con.get(key) if err != nil { - return err + return errors.Wrapf(err, "error in copy for from: '%s'", from) } - path := op.path() + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } con, key = findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) } valCopy, sz, err := deepCopy(val) if err != nil { - return err + return errors.Wrapf(err, "error while performing deep copy") } + (*accumulatedCopySize) += int64(sz) if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) } - return con.add(key, valCopy) + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil } // Equal indicates if 2 JSON documents have the same structural equality. @@ -651,7 +739,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { var accumulatedCopySize int64 for _, op := range p { - switch op.kind() { + switch op.Kind() { case "add": err = p.add(&pd, op) case "remove": @@ -665,7 +753,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { case "copy": err = p.copy(&pd, op, &accumulatedCopySize) default: - err = fmt.Errorf("Unexpected kind: %s", op.kind()) + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) } if err != nil { diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig new file mode 100644 index 00000000..3152da69 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore new file mode 100644 index 00000000..769c2440 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml new file mode 100644 index 00000000..9aef9184 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..9322b065 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md new file mode 100644 index 00000000..813788af --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -0,0 +1,15 @@ +# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) +An implementation of JSON Pointer - Go language + +## Status +Completed YES + +Tested YES + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod new file mode 100644 index 00000000..3e45e225 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/go.mod @@ -0,0 +1,9 @@ +module github.com/go-openapi/jsonpointer + +require ( + github.com/go-openapi/swag v0.19.5 + github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect + github.com/stretchr/testify v1.3.0 +) + +go 1.13 diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum new file mode 100644 index 00000000..953d4f35 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/go.sum @@ -0,0 +1,24 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go new file mode 100644 index 00000000..b284eb77 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -0,0 +1,390 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package jsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +const ( + emptyPointer = `` + pointerSeparator = `/` + + invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator +) + +var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() +var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() + +// JSONPointable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONPointable interface { + JSONLookup(string) (interface{}, error) +} + +// JSONSetable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONSetable interface { + JSONSet(string, interface{}) error +} + +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + + var p Pointer + err := p.parse(jsonPointerString) + return p, err + +} + +// Pointer the json pointer reprsentation +type Pointer struct { + referenceTokens []string +} + +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + + var err error + + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.New(invalidStart) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + for _, referenceToken := range referenceTokens[1:] { + p.referenceTokens = append(p.referenceTokens, referenceToken) + } + } + } + + return err +} + +// Get uses the pointer to retrieve a value from a JSON document +func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + return p.get(document, swag.DefaultJSONNameProvider) +} + +// Set uses the pointer to set a value from a JSON document +func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { + return document, p.set(document, value, swag.DefaultJSONNameProvider) +} + +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) +} + +// SetForToken gets a value for a json pointer token 1 level deep +func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { + return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) +} + +func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + switch kind { + + case reflect.Struct: + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + } + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if mv.IsValid() { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { + rValue := reflect.Indirect(reflect.ValueOf(node)) + switch rValue.Kind() { + + case reflect.Struct: + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } + + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.IsValid() { + fld.Set(reflect.ValueOf(data)) + } + return nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if !elem.CanSet() { + return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) + } + elem.Set(reflect.ValueOf(data)) + return nil + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + return node, kind, nil + } + + for _, token := range p.referenceTokens { + + decodedToken := Unescape(token) + + r, knd, err := getSingleImpl(node, decodedToken, nameProvider) + if err != nil { + return nil, knd, err + } + node, kind = r, knd + + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + return node, kind, nil +} + +func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { + knd := reflect.ValueOf(node).Kind() + + if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + } + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + // Full document when empty + if len(p.referenceTokens) == 0 { + return nil + } + + lastI := len(p.referenceTokens) - 1 + for i, token := range p.referenceTokens { + isLastToken := i == lastI + decodedToken := Unescape(token) + + if isLastToken { + + return setSingleImpl(node, data, decodedToken, nameProvider) + } + + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + switch kind { + + case reflect.Struct: + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return err + } + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = r + continue + } + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = fld.Interface() + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if !mv.IsValid() { + return fmt.Errorf("object has no key %q", decodedToken) + } + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { + node = mv.Addr().Interface() + continue + } + node = mv.Interface() + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { + node = elem.Addr().Interface() + continue + } + node = elem.Interface() + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + + } + + return nil +} + +// DecodedTokens returns the decoded tokens +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) + } + return result +} + +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 +} + +// Pointer to string representation function +func (p *Pointer) String() string { + + if len(p.referenceTokens) == 0 { + return emptyPointer + } + + pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + encRefTok0 = `~0` + encRefTok1 = `~1` + decRefTok0 = `~` + decRefTok1 = `/` +) + +// Unescape unescapes a json pointer reference token string to the original representation +func Unescape(token string) string { + step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) + step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + return step2 +} + +// Escape escapes a pointer reference token string +func Escape(token string) string { + step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) + step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + return step2 +} diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore new file mode 100644 index 00000000..769c2440 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml new file mode 100644 index 00000000..40b90757 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..9322b065 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md new file mode 100644 index 00000000..66345f4c --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -0,0 +1,15 @@ +# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) +An implementation of JSON Reference - Go language + +## Status +Work in progress ( 90% done ) + +## Dependencies +https://github.com/go-openapi/jsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod new file mode 100644 index 00000000..35adddfe --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/go.mod @@ -0,0 +1,10 @@ +module github.com/go-openapi/jsonreference + +require ( + github.com/PuerkitoBio/purell v1.1.1 + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/go-openapi/jsonpointer v0.19.2 + github.com/stretchr/testify v1.3.0 + golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect + golang.org/x/text v0.3.2 // indirect +) diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum new file mode 100644 index 00000000..f1a7a34e --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/go.sum @@ -0,0 +1,36 @@ +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go new file mode 100644 index 00000000..3bc0a6e2 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -0,0 +1,156 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package jsonreference + +import ( + "errors" + "net/url" + "strings" + + "github.com/PuerkitoBio/purell" + "github.com/go-openapi/jsonpointer" +) + +const ( + fragmentRune = `#` +) + +// New creates a new reference for the given string +func New(jsonReferenceString string) (Ref, error) { + + var r Ref + err := r.parse(jsonReferenceString) + return r, err + +} + +// MustCreateRef parses the ref string and panics when it's invalid. +// Use the New method for a version that returns an error +func MustCreateRef(ref string) Ref { + r, err := New(ref) + if err != nil { + panic(err) + } + return r +} + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +// GetURL gets the URL for this reference +func (r *Ref) GetURL() *url.URL { + return r.referenceURL +} + +// GetPointer gets the json pointer for this reference +func (r *Ref) GetPointer() *jsonpointer.Pointer { + return &r.referencePointer +} + +// String returns the best version of the url for this reference +func (r *Ref) String() string { + + if r.referenceURL != nil { + return r.referenceURL.String() + } + + if r.HasFragmentOnly { + return fragmentRune + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +// IsRoot returns true if this reference is a root document +func (r *Ref) IsRoot() bool { + return r.referenceURL != nil && + !r.IsCanonical() && + !r.HasURLPathOnly && + r.referenceURL.Fragment == "" +} + +// IsCanonical returns true when this pointer starts with http(s):// or file:// +func (r *Ref) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) +} + +// "Constructor", parses the given string JSON reference +func (r *Ref) parse(jsonReferenceString string) error { + + parsed, err := url.Parse(jsonReferenceString) + if err != nil { + return err + } + + r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + refURL := r.referenceURL + + if refURL.Scheme != "" && refURL.Host != "" { + r.HasFullURL = true + } else { + if refURL.Path != "" { + r.HasURLPathOnly = true + } else if refURL.RawQuery == "" && refURL.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refURL.Scheme == "file" + r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = jsonpointer.New(refURL.Fragment) + + return nil +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, errors.New("child url is nil") + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 00000000..3152da69 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 00000000..dd91ed6a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 00000000..3e33f9f2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,23 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml new file mode 100644 index 00000000..aa26d876 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..9322b065 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 00000000..6354742c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,10 @@ +# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +Currently supports Swagger 2.0. diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go new file mode 100644 index 00000000..d5ec7b90 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -0,0 +1,297 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// schemas/jsonschema-draft-04.json (4.357kB) +// schemas/v2/schema.json (40.249kB) + +package spec + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return bindataRead( + _jsonschemaDraft04JSON, + "jsonschema-draft-04.json", + ) +} + +func jsonschemaDraft04JSON() (*asset, error) { + bytes, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} + return a, nil +} + +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") + +func v2SchemaJSONBytes() ([]byte, error) { + return bindataRead( + _v2SchemaJSON, + "v2/schema.json", + ) +} + +func v2SchemaJSON() (*asset, error) { + bytes, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "jsonschema-draft-04.json": jsonschemaDraft04JSON, + + "v2/schema.json": v2SchemaJSON, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, + "v2": &bintree{nil, map[string]*bintree{ + "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 00000000..3fada0da --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,60 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "sync" + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + debugLog("getting %q from resolution cache", uri) + s.lock.RLock() + v, ok := s.store[uri] + debugLog("got %q from resolution cache: %t", uri, ok) + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var resCache ResolutionCache + +func init() { + resCache = initResolutionCache() +} + +// initResolutionCache initializes the URI resolution cache +func initResolutionCache() ResolutionCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 00000000..f285970a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 00000000..389c528f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of this package. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 00000000..1e7fc8c4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,650 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ExpandOptions provides options for spec expand +type ExpandOptions struct { + RelativeBase string + SkipSchemas bool + ContinueOnError bool + AbsoluteCircularRef bool +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + specBasePath := "" + if opts != nil && opts.RelativeBase != "" { + specBasePath, _ = absPath(opts.RelativeBase) + } + + result := new(Schema) + if err := resolver.Resolve(ref, result, specBasePath); err != nil { + return nil, err + } + return result, nil +} + +// ResolveRef resolves a reference against a context root +// ref is guaranteed to be in root (no need to go to external files) +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + panic(err) + } + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + b, _ := json.Marshal(sch) + newSch := new(Schema) + _ = json.Unmarshal(b, newSch) + return newSch, nil + default: + return nil, fmt.Errorf("unknown type for the resolved reference") + } +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + + result := new(Parameter) + if err := resolver.Resolve(&ref, result, ""); err != nil { + return nil, err + } + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + + result := new(Response) + if err := resolver.Resolve(&ref, result, ""); err != nil { + return nil, err + } + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + basePath := "" + if opts.RelativeBase != "" { + basePath = opts.RelativeBase + } + result := new(Items) + if err := resolver.Resolve(&ref, result, basePath); err != nil { + return nil, err + } + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + basePath := "" + if opts.RelativeBase != "" { + basePath = opts.RelativeBase + } + result := new(PathItem) + if err := resolver.Resolve(&ref, result, basePath); err != nil { + return nil, err + } + return result, nil +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + resolver, err := defaultSchemaLoader(spec, options, nil, nil) + // Just in case this ever returns an error. + if resolver.shouldStopOnError(err) { + return err + } + + // getting the base path of the spec to adjust all subsequent reference resolutions + specBasePath := "" + if options != nil && options.RelativeBase != "" { + specBasePath, _ = absPath(options.RelativeBase) + } + + if options == nil || !options.SkipSchemas { + for key, definition := range spec.Definitions { + var def *Schema + var err error + if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + path := spec.Paths.Paths[key] + if err := expandPathItem(&path, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = path + } + } + + return nil +} + +// baseForRoot loads in the cache the root document and produces a fake "root" base path entry +// for further $ref resolution +func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + const rootBase = "root" + if root != nil { + base, _ := absPath(rootBase) + normalizedBase := normalizeAbsPath(base) + debugLog("setting root doc in cache at: %s", normalizedBase) + if cache == nil { + cache = resCache + } + cache.Set(normalizedBase, root) + return rootBase + } + return "" +} + +// ExpandSchema expands the refs in the schema object with reference to the root object +// go-openapi/validate uses this function +// notice that it is impossible to reference a json schema in a different file other than root +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + var basePath string + if opts.RelativeBase != "" { + basePath, _ = absPath(opts.RelativeBase) + } + + resolver, err := defaultSchemaLoader(nil, opts, cache, nil) + if err != nil { + return err + } + + refs := []string{""} + var s *Schema + if s, err = expandSchema(*schema, refs, resolver, basePath); err != nil { + return err + } + *schema = *s + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items != nil { + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + } + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + // normalizing is important + newRef := normalizeFileRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + // important: ID can be relative path + if target.ID != "" { + debugLog("schema has ID: %s", target.ID) + // handling the case when id is a folder + // remember that basePath has to be a file + refPath := target.ID + if strings.HasSuffix(target.ID, "/") { + // path.Clean here would not work correctly if basepath is http + refPath = fmt.Sprintf("%s%s", refPath, "placeholder.json") + } + basePath = normalizePaths(refPath, basePath) + } + + var t *Schema + // if Ref is found, everything else doesn't matter + // Ref also changes the resolution scope of children expandSchema + if target.Ref.String() != "" { + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeFileRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + debugLog("basePath: %s: calling Resolve with target: %#v", basePath, target) + if err := resolver.Resolve(&target.Ref, &t, basePath); resolver.shouldStopOnError(err) { + return nil, err + } + + if t != nil { + parentRefs = append(parentRefs, normalizedRef.String()) + var err error + transitiveResolver, err := resolver.transitiveResolver(basePath, target.Ref) + if transitiveResolver.shouldStopOnError(err) { + return nil, err + } + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + target.AllOf[i] = *t + } + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + target.AnyOf[i] = *t + } + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + for k := range target.Definitions { + t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Definitions[k] = *t + } + } + return &target, nil +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := []string{} + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + if pathItem.Ref.String() != "" { + var err error + resolver, err = resolver.transitiveResolver(basePath, pathItem.Ref) + if resolver.shouldStopOnError(err) { + return err + } + } + pathItem.Ref = Ref{} + + for idx := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[idx]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses != nil { + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + } + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// This is the exported version of expandResponse +// all refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } + opts := &ExpandOptions{ + RelativeBase: specBasePath, + } + resolver, err := defaultSchemaLoader(nil, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } + opts := &ExpandOptions{ + RelativeBase: specBasePath, + } + resolver, err := defaultSchemaLoader(nil, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ref *Ref + var sch *Schema + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("expand: unsupported type %T. Input should be of type *Parameter or *Response", input) + } + return ref, sch, nil +} + +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, _, err := getRefAndSchema(input) + if err != nil { + return err + } + if ref == nil { + return nil + } + parentRefs := []string{} + if err := resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + ref, sch, _ := getRefAndSchema(input) + if ref.String() != "" { + transitiveResolver, err := resolver.transitiveResolver(basePath, *ref) + if transitiveResolver.shouldStopOnError(err) { + return err + } + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch != nil && sch.Ref.String() != "" { + // schema expanded to a $ref in another root + var ern error + sch.Ref, ern = NewRef(normalizePaths(sch.Ref.String(), ref.RemoteURI())) + if ern != nil { + return ern + } + } + if ref != nil { + *ref = Ref{} + } + + if !resolver.options.SkipSchemas && sch != nil { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + *sch = *s + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 00000000..88add91b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod new file mode 100644 index 00000000..42073be0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/go.mod @@ -0,0 +1,14 @@ +module github.com/go-openapi/spec + +require ( + github.com/go-openapi/jsonpointer v0.19.2 + github.com/go-openapi/jsonreference v0.19.2 + github.com/go-openapi/swag v0.19.2 + github.com/kr/pty v1.1.5 // indirect + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.3.0 + golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 // indirect + golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect + golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum new file mode 100644 index 00000000..73e97a2d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/go.sum @@ -0,0 +1,66 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.17.0 h1:nH6xp8XdXHx8dqveo0ZuJBluCO2qGrPbDNZ0dwoRHP0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= +github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/swag v0.17.0 h1:iqrgMg7Q7SvtbWLlltPrkMs0UBJI6oTSs79JFRUi880= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 00000000..39efe452 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,197 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 00000000..c458b49b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 00000000..365d1631 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,244 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 00000000..f20961b4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 00000000..b8957e7c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// normalize absolute path for cache. +// on Windows, drive letters should be converted to lower as scheme in net/url.URL +func normalizeAbsPath(path string) string { + u, err := url.Parse(path) + if err != nil { + debugLog("normalize absolute path failed: %s", err) + return path + } + return u.String() +} + +// base or refPath could be a file path or a URL +// given a base absolute path and a ref path, return the absolute path of refPath +// 1) if refPath is absolute, return it +// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists +// base could be a directory or a full file path +func normalizePaths(refPath, base string) string { + refURL, _ := url.Parse(refPath) + if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { + // refPath is actually absolute + if refURL.Host != "" { + return refPath + } + parts := strings.Split(refPath, "#") + result := filepath.FromSlash(parts[0]) + if len(parts) == 2 { + result += "#" + parts[1] + } + return result + } + + // relative refPath + baseURL, _ := url.Parse(base) + if !strings.HasPrefix(refPath, "#") { + // combining paths + if baseURL.Host != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } else { // base is a file + newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) + return newBase + } + + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + return baseURL.String() +} + +// denormalizePaths returns to simplest notation on file $ref, +// i.e. strips the absolute path and sets a path relative to the base path. +// +// This is currently used when we rewrite ref after a circular ref has been detected +func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { + debugLog("denormalizeFileRef for: %s", ref.String()) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + return ref + } + // strip relativeBase from URI + relativeBaseURL, _ := url.Parse(relativeBase) + relativeBaseURL.Fragment = "" + + if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { + // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix + r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) + return &r + } + + if relativeBaseURL.IsAbs() { + // other absolute URL get unchanged (i.e. with a non-empty scheme) + return ref + } + + // for relative file URIs: + originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) + originalRelativeBaseURL.Fragment = "" + if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { + // the resulting ref is in the expanded spec: return a local ref + r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) + return &r + } + + // check if we may set a relative path, considering the original base path for this spec. + // Example: + // spec is located at /mypath/spec.json + // my normalized ref points to: /mypath/item.json#/target + // expected result: item.json#/target + parts := strings.Split(ref.String(), "#") + relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) + if err != nil { + // there is no common ancestor (e.g. different drives on windows) + // leaves the ref unchanged + return ref + } + if len(parts) == 2 { + relativePath += "#" + parts[1] + } + r, _ := NewRef(relativePath) + return &r +} + +// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL +func normalizeFileRef(ref *Ref, relativeBase string) *Ref { + // This is important for when the reference is pointing to the root schema + if ref.String() == "" { + r, _ := NewRef(relativeBase) + return &r + } + + debugLog("normalizing %s against %s", ref.String(), relativeBase) + + s := normalizePaths(ref.String(), relativeBase) + r, _ := NewRef(s) + return &r +} + +// absPath returns the absolute path of a file +func absPath(fname string) (string, error) { + if strings.HasPrefix(fname, "http") { + return fname, nil + } + if filepath.IsAbs(fname) { + return fname, nil + } + wd, err := os.Getwd() + return filepath.Join(wd, fname), err +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 00000000..b1ebd599 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,398 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +func init() { + //gob.Register(map[string][]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := append(o.Parameters[:i], *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 00000000..cecdff54 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,321 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, + SimpleSchema: SimpleSchema{Type: "object"}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 00000000..68fc8e90 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 00000000..9dc82a29 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 00000000..08ff869b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,191 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return r.String() + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + rr, err := http.Get(v) + if err != nil { + return false + } + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 00000000..27729c1d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,131 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 00000000..4efb6f86 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,127 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]Response + if err := json.Unmarshal(data, &res); err != nil { + return nil + } + if v, ok := res["default"]; ok { + r.Default = &v + delete(res, "default") + } + for k, v := range res { + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = v + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 00000000..37858ece --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,596 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := url.Parse(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 00000000..c34a96fa --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader function to use when loading remote refs +var PathLoader func(string) (json.RawMessage, error) + +func init() { + PathLoader = func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, which allows shortcuts. + // NOTE: this is not just a performance improvement: it is required to figure out + // circular references which participate several cycles. + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string +} + +func newResolverContext(originalBasePath string) *resolverContext { + return &resolverContext{ + circulars: make(map[string]bool), + basePath: originalBasePath, // keep the root base path in context + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext + loadDoc func(string) (json.RawMessage, error) +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) { + if ref.IsRoot() || ref.HasFragmentOnly { + return r, nil + } + + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r, nil + } + + // Set a new root to resolve against + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + debugLog("setting new root: %s", newOptions.RelativeBase) + resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context) + if err != nil { + return nil, err + } + + return resolver, nil +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + debugLog("got a new resolver") + if transitive.options != nil && transitive.options.RelativeBase != "" { + basePath, _ = absPath(transitive.options.RelativeBase) + debugLog("new basePath = %s", basePath) + } + } + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return fmt.Errorf("resolve ref: target needs to be a pointer") + } + + refURL := ref.GetURL() + if refURL == nil { + return nil + } + + var res interface{} + var data interface{} + var err error + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeFileRef(ref, basePath) + debugLog("current ref is: %s", ref.String()) + debugLog("current ref normalized file: %s", baseRef.String()) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + normalized := normalizeAbsPath(toFetch.String()) + + data, fromCache := r.cache.Get(normalized) + if !fromCache { + b, err := r.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + if err := json.Unmarshal(b, &data); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, data) + } + + return data, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizePaths(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target +// Resolve is not in charge of following references, it only resolves ref by following its URL +// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them +// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("deref: unsupported type %T", input) + } + + curRef := ref.String() + if curRef != "" { + normalizedRef := normalizeFileRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + // NOTE(fredbi): removed basePath check => needs more testing + if ref.String() != "" && ref.String() != curRef { + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) + } + } + + return nil +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) (*schemaLoader, error) { + + if cache == nil { + cache = resCache + } + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + absBase, _ := absPath(expandOptions.RelativeBase) + if context == nil { + context = newResolverContext(absBase) + } + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + loadDoc: func(path string) (json.RawMessage, error) { + debugLog("fetching document at %q", path) + return PathLoader(path) + }, + }, nil +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 00000000..fe353842 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,140 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SecuritySchemeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 00000000..0bb045bc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,86 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "encoding/json" + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +var ( + jsonSchema *Schema + swaggerSchema *Schema +) + +func init() { + jsonSchema = MustLoadJSONSchemaDraft04() + swaggerSchema = MustLoadSwagger20Schema() +} + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := Asset("jsonschema-draft-04.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := Asset("v2/schema.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 00000000..44722ffd --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,448 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) >= 4 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 00000000..faa3d3de --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,75 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/unused.go b/vendor/github.com/go-openapi/spec/unused.go new file mode 100644 index 00000000..aa12b56f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/unused.go @@ -0,0 +1,174 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +/* + +import ( + "net/url" + "os" + "path" + "path/filepath" + + "github.com/go-openapi/jsonpointer" +) + + // Some currently unused functions and definitions that + // used to be part of the expander. + + // Moved here for the record and possible future reuse + +var ( + idPtr, _ = jsonpointer.New("/id") + refPtr, _ = jsonpointer.New("/$ref") +) + +func idFromNode(node interface{}) (*Ref, error) { + if idValue, _, err := idPtr.Get(node); err == nil { + if refStr, ok := idValue.(string); ok && refStr != "" { + idRef, err := NewRef(refStr) + if err != nil { + return nil, err + } + return &idRef, nil + } + } + return nil, nil +} + +func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { + if startingRef == nil { + return nil + } + + if ptr == nil { + return startingRef + } + + ret := startingRef + var idRef *Ref + node := startingNode + + for _, tok := range ptr.DecodedTokens() { + node, _, _ = jsonpointer.GetForToken(node, tok) + if node == nil { + break + } + + idRef, _ = idFromNode(node) + if idRef != nil { + nw, err := ret.Inherits(*idRef) + if err != nil { + break + } + ret = nw + } + + refRef, _, _ := refPtr.Get(node) + if refRef != nil { + var rf Ref + switch value := refRef.(type) { + case string: + rf, _ = NewRef(value) + } + nw, err := ret.Inherits(rf) + if err != nil { + break + } + nwURL := nw.GetURL() + if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { + nwpt := filepath.ToSlash(nwURL.Path) + if filepath.IsAbs(nwpt) { + _, err := os.Stat(nwpt) + if err != nil { + nwURL.Path = filepath.Join(".", nwpt) + } + } + } + + ret = nw + } + + } + + return ret +} + +// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID +func basePathFromSchemaID(oldBasePath, id string) string { + u, err := url.Parse(oldBasePath) + if err != nil { + panic(err) + } + uid, err := url.Parse(id) + if err != nil { + panic(err) + } + + if path.IsAbs(uid.Path) { + return id + } + u.Path = path.Join(path.Dir(u.Path), uid.Path) + return u.String() +} +*/ + +// type ExtraSchemaProps map[string]interface{} + +// // JSONSchema represents a structure that is a json schema draft 04 +// type JSONSchema struct { +// SchemaProps +// ExtraSchemaProps +// } + +// // MarshalJSON marshal this to JSON +// func (s JSONSchema) MarshalJSON() ([]byte, error) { +// b1, err := json.Marshal(s.SchemaProps) +// if err != nil { +// return nil, err +// } +// b2, err := s.Ref.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b3, err := s.Schema.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b4, err := json.Marshal(s.ExtraSchemaProps) +// if err != nil { +// return nil, err +// } +// return swag.ConcatJSON(b1, b2, b3, b4), nil +// } + +// // UnmarshalJSON marshal this from JSON +// func (s *JSONSchema) UnmarshalJSON(data []byte) error { +// var sch JSONSchema +// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Ref); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Schema); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { +// return err +// } +// *s = sch +// return nil +// } diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 00000000..945a4670 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/swag/.editorconfig b/vendor/github.com/go-openapi/swag/.editorconfig new file mode 100644 index 00000000..3152da69 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore new file mode 100644 index 00000000..d69b53ac --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +vendor +Godeps +.idea diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml new file mode 100644 index 00000000..625c3d6a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -0,0 +1,22 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 25 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 3 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoinits + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml new file mode 100644 index 00000000..aa26d876 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..9322b065 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md new file mode 100644 index 00000000..eb60ae80 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/README.md @@ -0,0 +1,22 @@ +# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/swag.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) + +Contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + +* convert between value and pointers for builtin types +* convert from string to builtin types (wraps strconv) +* fast json concatenation +* search in path +* load from file or http +* name mangling + + +This repo has only few dependencies outside of the standard library: + +* YAML utilities depend on gopkg.in/yaml.v2 diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go new file mode 100644 index 00000000..7da35c31 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -0,0 +1,208 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "math" + "strconv" + "strings" +) + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 +) + +// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive +func IsFloat64AJSONInteger(f float64) bool { + if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { + return false + } + fa := math.Abs(f) + g := float64(uint64(f)) + ga := math.Abs(g) + + diff := math.Abs(f - g) + + // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases + switch { + case f == g: // best case + return true + case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case + return true + case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values + return diff < (epsilon * math.SmallestNonzeroFloat64) + } + // check the relative error + return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon +} + +var evaluatesAsTrue map[string]struct{} + +func init() { + evaluatesAsTrue = map[string]struct{}{ + "true": {}, + "1": {}, + "yes": {}, + "ok": {}, + "y": {}, + "on": {}, + "selected": {}, + "checked": {}, + "t": {}, + "enabled": {}, + } +} + +// ConvertBool turn a string into a boolean +func ConvertBool(str string) (bool, error) { + _, ok := evaluatesAsTrue[strings.ToLower(str)] + return ok, nil +} + +// ConvertFloat32 turn a string into a float32 +func ConvertFloat32(str string) (float32, error) { + f, err := strconv.ParseFloat(str, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// ConvertFloat64 turn a string into a float64 +func ConvertFloat64(str string) (float64, error) { + return strconv.ParseFloat(str, 64) +} + +// ConvertInt8 turn a string into int8 boolean +func ConvertInt8(str string) (int8, error) { + i, err := strconv.ParseInt(str, 10, 8) + if err != nil { + return 0, err + } + return int8(i), nil +} + +// ConvertInt16 turn a string into a int16 +func ConvertInt16(str string) (int16, error) { + i, err := strconv.ParseInt(str, 10, 16) + if err != nil { + return 0, err + } + return int16(i), nil +} + +// ConvertInt32 turn a string into a int32 +func ConvertInt32(str string) (int32, error) { + i, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// ConvertInt64 turn a string into a int64 +func ConvertInt64(str string) (int64, error) { + return strconv.ParseInt(str, 10, 64) +} + +// ConvertUint8 turn a string into a uint8 +func ConvertUint8(str string) (uint8, error) { + i, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return 0, err + } + return uint8(i), nil +} + +// ConvertUint16 turn a string into a uint16 +func ConvertUint16(str string) (uint16, error) { + i, err := strconv.ParseUint(str, 10, 16) + if err != nil { + return 0, err + } + return uint16(i), nil +} + +// ConvertUint32 turn a string into a uint32 +func ConvertUint32(str string) (uint32, error) { + i, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// ConvertUint64 turn a string into a uint64 +func ConvertUint64(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} + +// FormatBool turns a boolean into a string +func FormatBool(value bool) string { + return strconv.FormatBool(value) +} + +// FormatFloat32 turns a float32 into a string +func FormatFloat32(value float32) string { + return strconv.FormatFloat(float64(value), 'f', -1, 32) +} + +// FormatFloat64 turns a float64 into a string +func FormatFloat64(value float64) string { + return strconv.FormatFloat(value, 'f', -1, 64) +} + +// FormatInt8 turns an int8 into a string +func FormatInt8(value int8) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt16 turns an int16 into a string +func FormatInt16(value int16) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt32 turns an int32 into a string +func FormatInt32(value int32) string { + return strconv.Itoa(int(value)) +} + +// FormatInt64 turns an int64 into a string +func FormatInt64(value int64) string { + return strconv.FormatInt(value, 10) +} + +// FormatUint8 turns an uint8 into a string +func FormatUint8(value uint8) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint16 turns an uint16 into a string +func FormatUint16(value uint16) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint32 turns an uint32 into a string +func FormatUint32(value uint32) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint64 turns an uint64 into a string +func FormatUint64(value uint64) string { + return strconv.FormatUint(value, 10) +} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go new file mode 100644 index 00000000..c95e4e78 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert_types.go @@ -0,0 +1,595 @@ +package swag + +import "time" + +// This file was taken from the aws go sdk + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to of the int64 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int64 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pouinter to of the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pouinter passed in or +// 0 if the pouinter is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pouinters +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pouinters uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pouinters +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pouinters uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pouinter to of the uint64 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint64 pouinter passed in or +// 0 if the pouinter is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint64 values uinto a slice of +// uint32 pouinters +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values uinto a string +// map of uint32 pouinters +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pouinters uinto a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pouinter to of the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pouinter passed in or +// 0 if the pouinter is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values uinto a slice of +// uint64 pouinters +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values uinto a string +// map of uint64 pouinters +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pouinters uinto a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go new file mode 100644 index 00000000..8d2c8c50 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -0,0 +1,32 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + + * convert between value and pointers for builtin types + * convert from string to builtin types (wraps strconv) + * fast json concatenation + * search in path + * load from file or http + * name mangling + + +This repo has only few dependencies outside of the standard library: + + * YAML utilities depend on gopkg.in/yaml.v2 +*/ +package swag diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod new file mode 100644 index 00000000..15bbb082 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.mod @@ -0,0 +1,14 @@ +module github.com/go-openapi/swag + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 + github.com/stretchr/testify v1.3.0 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/yaml.v2 v2.2.2 +) + +replace github.com/golang/lint => golang.org/x/lint v0.0.0-20190409202823-959b441ac422 + +replace sourcegraph.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.5.1 diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum new file mode 100644 index 00000000..33469f54 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.sum @@ -0,0 +1,20 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go new file mode 100644 index 00000000..edf93d84 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/json.go @@ -0,0 +1,312 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "bytes" + "encoding/json" + "log" + "reflect" + "strings" + "sync" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + +// DefaultJSONNameProvider the default cache for types +var DefaultJSONNameProvider = NewNameProvider() + +const comma = byte(',') + +var closers map[byte]byte + +func init() { + closers = map[byte]byte{ + '{': '}', + '[': ']', + } +} + +type ejMarshaler interface { + MarshalEasyJSON(w *jwriter.Writer) +} + +type ejUnmarshaler interface { + UnmarshalEasyJSON(w *jlexer.Lexer) +} + +// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller +// so it takes the fastest option available. +func WriteJSON(data interface{}) ([]byte, error) { + if d, ok := data.(ejMarshaler); ok { + jw := new(jwriter.Writer) + d.MarshalEasyJSON(jw) + return jw.BuildBytes() + } + if d, ok := data.(json.Marshaler); ok { + return d.MarshalJSON() + } + return json.Marshal(data) +} + +// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller +// so it takes the fastes option available +func ReadJSON(data []byte, value interface{}) error { + trimmedData := bytes.Trim(data, "\x00") + if d, ok := value.(ejUnmarshaler); ok { + jl := &jlexer.Lexer{Data: trimmedData} + d.UnmarshalEasyJSON(jl) + return jl.Error() + } + if d, ok := value.(json.Unmarshaler); ok { + return d.UnmarshalJSON(trimmedData) + } + return json.Unmarshal(trimmedData, value) +} + +// DynamicJSONToStruct converts an untyped json structure into a struct +func DynamicJSONToStruct(data interface{}, target interface{}) error { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, err := WriteJSON(data) + if err != nil { + return err + } + return ReadJSON(b, target) +} + +// ConcatJSON concatenates multiple json objects efficiently +func ConcatJSON(blobs ...[]byte) []byte { + if len(blobs) == 0 { + return nil + } + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last-- + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { + return blobs[0] + } + + var opening, closing byte + var idx, a int + buf := bytes.NewBuffer(nil) + + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } + if len(b) > 0 && opening == 0 { // is this an array or an object? + opening, closing = b[0], closers[b[0]] + } + + if opening != '{' && opening != '[' { + continue // don't know how to concatenate non container objects + } + + if len(b) < 3 { // yep empty but also the last one, so closing this thing + if i == last && a > 0 { + if err := buf.WriteByte(closing); err != nil { + log.Println(err) + } + } + continue + } + + idx = 0 + if a > 0 { // we need to join with a comma for everything beyond the first non-empty item + if err := buf.WriteByte(comma); err != nil { + log.Println(err) + } + idx = 1 // this is not the first or the last so we want to drop the leading bracket + } + + if i != last { // not the last one, strip brackets + if _, err := buf.Write(b[idx : len(b)-1]); err != nil { + log.Println(err) + } + } else { // last one, strip only the leading bracket + if _, err := buf.Write(b[idx:]); err != nil { + log.Println(err) + } + } + a++ + } + // somehow it ended up being empty, so provide a default value + if buf.Len() == 0 { + if err := buf.WriteByte(opening); err != nil { + log.Println(err) + } + if err := buf.WriteByte(closing); err != nil { + log.Println(err) + } + } + return buf.Bytes() +} + +// ToDynamicJSON turns an object into a properly JSON typed structure +func ToDynamicJSON(data interface{}) interface{} { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, err := json.Marshal(data) + if err != nil { + log.Println(err) + } + var res interface{} + if err := json.Unmarshal(b, &res); err != nil { + log.Println(err) + } + return res +} + +// FromDynamicJSON turns an object into a properly JSON typed structure +func FromDynamicJSON(data, target interface{}) error { + b, err := json.Marshal(data) + if err != nil { + log.Println(err) + } + return json.Unmarshal(b, target) +} + +// NameProvider represents an object capabale of translating from go property names +// to json property names +// This type is thread-safe. +type NameProvider struct { + lock *sync.Mutex + index map[reflect.Type]nameIndex +} + +type nameIndex struct { + jsonNames map[string]string + goNames map[string]string +} + +// NewNameProvider creates a new name provider +func NewNameProvider() *NameProvider { + return &NameProvider{ + lock: &sync.Mutex{}, + index: make(map[reflect.Type]nameIndex), + } +} + +func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { + for i := 0; i < tpe.NumField(); i++ { + targetDes := tpe.Field(i) + + if targetDes.PkgPath != "" { // unexported + continue + } + + if targetDes.Anonymous { // walk embedded structures tree down first + buildnameIndex(targetDes.Type, idx, reverseIdx) + continue + } + + if tag := targetDes.Tag.Get("json"); tag != "" { + + parts := strings.Split(tag, ",") + if len(parts) == 0 { + continue + } + + nm := parts[0] + if nm == "-" { + continue + } + if nm == "" { // empty string means we want to use the Go name + nm = targetDes.Name + } + + idx[nm] = targetDes.Name + reverseIdx[targetDes.Name] = nm + } + } +} + +func newNameIndex(tpe reflect.Type) nameIndex { + var idx = make(map[string]string, tpe.NumField()) + var reverseIdx = make(map[string]string, tpe.NumField()) + + buildnameIndex(tpe, idx, reverseIdx) + return nameIndex{jsonNames: idx, goNames: reverseIdx} +} + +// GetJSONNames gets all the json property names for a type +func (n *NameProvider) GetJSONNames(subject interface{}) []string { + n.lock.Lock() + defer n.lock.Unlock() + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + + res := make([]string, 0, len(names.jsonNames)) + for k := range names.jsonNames { + res = append(res, k) + } + return res +} + +// GetJSONName gets the json name for a go property name +func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetJSONNameForType(tpe, name) +} + +// GetJSONNameForType gets the json name for a go property name on a given type +func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.goNames[name] + return nme, ok +} + +func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { + names := newNameIndex(tpe) + n.index[tpe] = names + return names +} + +// GetGoName gets the go name for a json property name +func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetGoNameForType(tpe, name) +} + +// GetGoNameForType gets the go name for a given type for a json property name +func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.jsonNames[name] + return nme, ok +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go new file mode 100644 index 00000000..70f4fb36 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -0,0 +1,80 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "path/filepath" + "strings" + "time" +) + +// LoadHTTPTimeout the default timeout for load requests +var LoadHTTPTimeout = 30 * time.Second + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in +func LoadFromFileOrHTTP(path string) ([]byte, error) { + return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +} + +// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in +// timeout arg allows for per request overriding of the request timeout +func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) +} + +// LoadStrategy returns a loader function for a given path or uri +func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(path, "http") { + return remote + } + return func(pth string) ([]byte, error) { + upth, err := pathUnescape(pth) + if err != nil { + return nil, err + } + return local(filepath.FromSlash(upth)) + } +} + +func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { + return func(path string) ([]byte, error) { + client := &http.Client{Timeout: timeout} + req, err := http.NewRequest("GET", path, nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req) + defer func() { + if resp != nil { + if e := resp.Body.Close(); e != nil { + log.Println(e) + } + } + }() + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + } + + return ioutil.ReadAll(resp.Body) + } +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go new file mode 100644 index 00000000..aa7f6a9b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "unicode" + +type ( + nameLexem interface { + GetUnsafeGoName() string + GetOriginal() string + IsInitialism() bool + } + + initialismNameLexem struct { + original string + matchedInitialism string + } + + casualNameLexem struct { + original string + } +) + +func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { + return &initialismNameLexem{ + original: original, + matchedInitialism: matchedInitialism, + } +} + +func newCasualNameLexem(original string) *casualNameLexem { + return &casualNameLexem{ + original: original, + } +} + +func (l *initialismNameLexem) GetUnsafeGoName() string { + return l.matchedInitialism +} + +func (l *casualNameLexem) GetUnsafeGoName() string { + var first rune + var rest string + for i, orig := range l.original { + if i == 0 { + first = orig + continue + } + if i > 0 { + rest = l.original[i:] + break + } + } + if len(l.original) > 1 { + return string(unicode.ToUpper(first)) + lower(rest) + } + + return l.original +} + +func (l *initialismNameLexem) GetOriginal() string { + return l.original +} + +func (l *casualNameLexem) GetOriginal() string { + return l.original +} + +func (l *initialismNameLexem) IsInitialism() bool { + return true +} + +func (l *casualNameLexem) IsInitialism() bool { + return false +} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go new file mode 100644 index 00000000..821235f8 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/net.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "net" + "strconv" +) + +// SplitHostPort splits a network address into a host and a port. +// The port is -1 when there is no port to be found +func SplitHostPort(addr string) (host string, port int, err error) { + h, p, err := net.SplitHostPort(addr) + if err != nil { + return "", -1, err + } + if p == "" { + return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} + } + + pi, err := strconv.Atoi(p) + if err != nil { + return "", -1, err + } + return h, pi, nil +} diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go new file mode 100644 index 00000000..941bd017 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/path.go @@ -0,0 +1,59 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "os" + "path/filepath" + "runtime" + "strings" +) + +const ( + // GOPATHKey represents the env key for gopath + GOPATHKey = "GOPATH" +) + +// FindInSearchPath finds a package in a provided lists of paths +func FindInSearchPath(searchPath, pkg string) string { + pathsList := filepath.SplitList(searchPath) + for _, path := range pathsList { + if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { + if _, err := os.Stat(evaluatedPath); err == nil { + return evaluatedPath + } + } + } + return "" +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +func FindInGoSearchPath(pkg string) string { + return FindInSearchPath(FullGoSearchPath(), pkg) +} + +// FullGoSearchPath gets the search paths for finding packages +func FullGoSearchPath() string { + allPaths := os.Getenv(GOPATHKey) + if allPaths == "" { + allPaths = filepath.Join(os.Getenv("HOME"), "go") + } + if allPaths != "" { + allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") + } else { + allPaths = runtime.GOROOT() + } + return allPaths +} diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go new file mode 100644 index 00000000..c2e686d3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go new file mode 100644 index 00000000..eb2f2d8b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -0,0 +1,67 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, value interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go new file mode 100644 index 00000000..6607f339 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go new file mode 100644 index 00000000..4bae187d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -0,0 +1,69 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Before go1.9, this may be implemented with a mutex on the map. +type indexOfInitialisms struct { + getMutex *sync.Mutex + index map[string]bool +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + getMutex: new(sync.Mutex), + index: make(map[string]bool, 50), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k, v := range initial { + m.index[k] = v + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + m.getMutex.Lock() + defer m.getMutex.Unlock() + _, ok := m.index[key] + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + m.index[key] = true + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k := range m.index { + result = append(result, k) + } + sort.Sort(sort.Reverse(byInitialism(result))) + return +} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go new file mode 100644 index 00000000..a1825fb7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/split.go @@ -0,0 +1,262 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "unicode" +) + +var nameReplaceTable = map[rune]string{ + '@': "At ", + '&': "And ", + '|': "Pipe ", + '$': "Dollar ", + '!': "Bang ", + '-': "", + '_': "", +} + +type ( + splitter struct { + postSplitInitialismCheck bool + initialisms []string + } + + splitterOption func(*splitter) *splitter +) + +// split calls the splitter; splitter provides more control and post options +func split(str string) []string { + lexems := newSplitter().split(str) + result := make([]string, 0, len(lexems)) + + for _, lexem := range lexems { + result = append(result, lexem.GetOriginal()) + } + + return result + +} + +func (s *splitter) split(str string) []nameLexem { + return s.toNameLexems(str) +} + +func newSplitter(options ...splitterOption) *splitter { + splitter := &splitter{ + postSplitInitialismCheck: false, + initialisms: initialisms, + } + + for _, option := range options { + splitter = option(splitter) + } + + return splitter +} + +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) *splitter { + s.postSplitInitialismCheck = true + return s +} + +type ( + initialismMatch struct { + start, end int + body []rune + complete bool + } + initialismMatches []*initialismMatch +) + +func (s *splitter) toNameLexems(name string) []nameLexem { + nameRunes := []rune(name) + matches := s.gatherInitialismMatches(nameRunes) + return s.mapMatchesToNameLexems(nameRunes, matches) +} + +func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { + matches := make(initialismMatches, 0) + + for currentRunePosition, currentRune := range nameRunes { + newMatches := make(initialismMatches, 0, len(matches)) + + // check current initialism matches + for _, match := range matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + newMatches = append(newMatches, match) + continue + } + + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if !s.initialismRuneEqual(currentMatchRune, currentRune) { + continue + } + + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + newMatches = append(newMatches, match) + } + + // check for new initialism matches + for _, initialism := range s.initialisms { + initialismRunes := []rune(initialism) + if s.initialismRuneEqual(initialismRunes[0], currentRune) { + newMatches = append(newMatches, &initialismMatch{ + start: currentRunePosition, + body: initialismRunes, + complete: false, + }) + } + } + + matches = newMatches + } + + return matches +} + +func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { + nameLexems := make([]nameLexem, 0) + + var lastAcceptedMatch *initialismMatch + for _, match := range matches { + if !match.complete { + continue + } + + if firstMatch := lastAcceptedMatch == nil; firstMatch { + nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) + nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + + continue + } + + if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch { + continue + } + + middle := nameRunes[lastAcceptedMatch.end+1 : match.start] + nameLexems = append(nameLexems, s.breakCasualString(middle)...) + nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + } + + // we have not found any accepted matches + if lastAcceptedMatch == nil { + return s.breakCasualString(nameRunes) + } + + if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + nameLexems = append(nameLexems, s.breakCasualString(rest)...) + } + + return nameLexems +} + +func (s *splitter) initialismRuneEqual(a, b rune) bool { + return a == b +} + +func (s *splitter) breakInitialism(original string) nameLexem { + return newInitialismNameLexem(original, original) +} + +func (s *splitter) breakCasualString(str []rune) []nameLexem { + segments := make([]nameLexem, 0) + currentSegment := "" + + addCasualNameLexem := func(original string) { + segments = append(segments, newCasualNameLexem(original)) + } + + addInitialismNameLexem := func(original, match string) { + segments = append(segments, newInitialismNameLexem(original, match)) + } + + addNameLexem := func(original string) { + if s.postSplitInitialismCheck { + for _, initialism := range s.initialisms { + if upper(initialism) == upper(original) { + addInitialismNameLexem(original, initialism) + return + } + } + } + + addCasualNameLexem(original) + } + + for _, rn := range string(str) { + if replace, found := nameReplaceTable[rn]; found { + if currentSegment != "" { + addNameLexem(currentSegment) + currentSegment = "" + } + + if replace != "" { + addNameLexem(replace) + } + + continue + } + + if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { + if currentSegment != "" { + addNameLexem(currentSegment) + currentSegment = "" + } + + continue + } + + if unicode.IsUpper(rn) { + if currentSegment != "" { + addNameLexem(currentSegment) + } + currentSegment = "" + } + + currentSegment += string(rn) + } + + if currentSegment != "" { + addNameLexem(currentSegment) + } + + return segments +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go new file mode 100644 index 00000000..9eac16af --- /dev/null +++ b/vendor/github.com/go-openapi/swag/util.go @@ -0,0 +1,385 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "reflect" + "strings" + "unicode" +) + +// commonInitialisms are common acronyms that are kept as whole uppercased words. +var commonInitialisms *indexOfInitialisms + +// initialisms is a slice of sorted initialisms +var initialisms []string + +var isInitialism func(string) bool + +// GoNamePrefixFunc sets an optional rule to prefix go names +// which do not start with a letter. +// +// e.g. to help converting "123" into "{prefix}123" +// +// The default is to prefix with "X" +var GoNamePrefixFunc func(string) string + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + var configuredInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +const ( + //collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func JoinByFormat(data []string, format string) []string { + if len(data) == 0 { + return data + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return data + default: + sep = "," + } + return []string{strings.Join(data, sep)} +} + +// SplitByFormat splits a string by a known format: +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +// +func SplitByFormat(data, format string) []string { + if data == "" { + return nil + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return nil + default: + sep = "," + } + var result []string + for _, s := range strings.Split(data, sep) { + if ts := strings.TrimSpace(s); ts != "" { + result = append(result, ts) + } + } + return result +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} + +// Removes leading whitespaces +func trim(str string) string { + return strings.Trim(str, " ") +} + +// Shortcut to strings.ToUpper() +func upper(str string) string { + return strings.ToUpper(trim(str)) +} + +// Shortcut to strings.ToLower() +func lower(str string) string { + return strings.ToLower(trim(str)) +} + +// Camelize an uppercased word +func Camelize(word string) (camelized string) { + for pos, ru := range []rune(word) { + if pos > 0 { + camelized += string(unicode.ToLower(ru)) + } else { + camelized += string(unicode.ToUpper(ru)) + } + } + return +} + +// ToFileName lowercases and underscores a go type name +func ToFileName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + + return strings.Join(out, "_") +} + +// ToCommandName lowercases and underscores a go type name +func ToCommandName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + return strings.Join(out, "-") +} + +// ToHumanNameLower represents a code name as a human series of words +func ToHumanNameLower(name string) string { + in := newSplitter(withPostSplitInitialismCheck).split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + if !w.IsInitialism() { + out = append(out, lower(w.GetOriginal())) + } else { + out = append(out, w.GetOriginal()) + } + } + + return strings.Join(out, " ") +} + +// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized +func ToHumanNameTitle(name string) string { + in := newSplitter(withPostSplitInitialismCheck).split(name) + + out := make([]string, 0, len(in)) + for _, w := range in { + original := w.GetOriginal() + if !w.IsInitialism() { + out = append(out, Camelize(original)) + } else { + out = append(out, original) + } + } + return strings.Join(out, " ") +} + +// ToJSONName camelcases a name which can be underscored or pascal cased +func ToJSONName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for i, w := range in { + if i == 0 { + out = append(out, lower(w)) + continue + } + out = append(out, Camelize(w)) + } + return strings.Join(out, "") +} + +// ToVarName camelcases a name which can be underscored or pascal cased +func ToVarName(name string) string { + res := ToGoName(name) + if isInitialism(res) { + return lower(res) + } + if len(res) <= 1 { + return lower(res) + } + return lower(res[:1]) + res[1:] +} + +// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes +func ToGoName(name string) string { + lexems := newSplitter(withPostSplitInitialismCheck).split(name) + + result := "" + for _, lexem := range lexems { + goName := lexem.GetUnsafeGoName() + + // to support old behavior + if lexem.IsInitialism() { + goName = upper(goName) + } + result += goName + } + + if len(result) > 0 { + // Only prefix with X when the first character isn't an ascii letter + first := []rune(result)[0] + if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { + if GoNamePrefixFunc == nil { + return "X" + result + } + result = GoNamePrefixFunc(name) + result + } + first = []rune(result)[0] + if unicode.IsLetter(first) && !unicode.IsUpper(first) { + result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) + } + } + + return result +} + +// ContainsStrings searches a slice of strings for a case-sensitive match +func ContainsStrings(coll []string, item string) bool { + for _, a := range coll { + if a == item { + return true + } + } + return false +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match +func ContainsStringsCI(coll []string, item string) bool { + for _, a := range coll { + if strings.EqualFold(a, item) { + return true + } + } + return false +} + +type zeroable interface { + IsZero() bool +} + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +func IsZero(data interface{}) bool { + // check for things that have an IsZero method instead + if vv, ok := data.(zeroable); ok { + return vv.IsZero() + } + // continue with slightly more complex reflection + v := reflect.ValueOf(data) + switch v.Kind() { + case reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + case reflect.Struct, reflect.Array: + return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) + case reflect.Invalid: + return true + } + return false +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + //commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() +} + +// CommandLineOptionsGroup represents a group of user-defined command line options +type CommandLineOptionsGroup struct { + ShortDescription string + LongDescription string + Options interface{} +} diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go new file mode 100644 index 00000000..ec969144 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -0,0 +1,246 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strconv" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" + yaml "gopkg.in/yaml.v2" +) + +// YAMLMatcher matches yaml +func YAMLMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".yaml" || ext == ".yml" +} + +// YAMLToJSON converts YAML unmarshaled data into json compatible data +func YAMLToJSON(data interface{}) (json.RawMessage, error) { + jm, err := transformData(data) + if err != nil { + return nil, err + } + b, err := WriteJSON(jm) + return json.RawMessage(b), err +} + +// BytesToYAMLDoc converts a byte slice into a YAML document +func BytesToYAMLDoc(data []byte) (interface{}, error) { + var canary map[interface{}]interface{} // validate this is an object and not a different type + if err := yaml.Unmarshal(data, &canary); err != nil { + return nil, err + } + + var document yaml.MapSlice // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { + return nil, err + } + return document, nil +} + +// JSONMapSlice represent a JSON object, with the order of keys maintained +type JSONMapSlice []JSONMapItem + +// MarshalJSON renders a JSONMapSlice as JSON +func (s JSONMapSlice) MarshalJSON() ([]byte, error) { + w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} + s.MarshalEasyJSON(w) + return w.BuildBytes() +} + +// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON +func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { + w.RawByte('{') + + ln := len(s) + last := ln - 1 + for i := 0; i < ln; i++ { + s[i].MarshalEasyJSON(w) + if i != last { // last item + w.RawByte(',') + } + } + + w.RawByte('}') +} + +// UnmarshalJSON makes a JSONMapSlice from JSON +func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON +func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { + if in.IsNull() { + in.Skip() + return + } + + var result JSONMapSlice + in.Delim('{') + for !in.IsDelim('}') { + var mi JSONMapItem + mi.UnmarshalEasyJSON(in) + result = append(result, mi) + } + *s = result +} + +// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice +type JSONMapItem struct { + Key string + Value interface{} +} + +// MarshalJSON renders a JSONMapItem as JSON +func (s JSONMapItem) MarshalJSON() ([]byte, error) { + w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} + s.MarshalEasyJSON(w) + return w.BuildBytes() +} + +// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON +func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { + w.String(s.Key) + w.RawByte(':') + w.Raw(WriteJSON(s.Value)) +} + +// UnmarshalJSON makes a JSONMapItem from JSON +func (s *JSONMapItem) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON +func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { + key := in.UnsafeString() + in.WantColon() + value := in.Interface() + in.WantComma() + s.Key = key + s.Value = value +} + +func transformData(input interface{}) (out interface{}, err error) { + format := func(t interface{}) (string, error) { + switch k := t.(type) { + case string: + return k, nil + case uint: + return strconv.FormatUint(uint64(k), 10), nil + case uint8: + return strconv.FormatUint(uint64(k), 10), nil + case uint16: + return strconv.FormatUint(uint64(k), 10), nil + case uint32: + return strconv.FormatUint(uint64(k), 10), nil + case uint64: + return strconv.FormatUint(k, 10), nil + case int: + return strconv.Itoa(k), nil + case int8: + return strconv.FormatInt(int64(k), 10), nil + case int16: + return strconv.FormatInt(int64(k), 10), nil + case int32: + return strconv.FormatInt(int64(k), 10), nil + case int64: + return strconv.FormatInt(k, 10), nil + default: + return "", fmt.Errorf("unexpected map key type, got: %T", k) + } + } + + switch in := input.(type) { + case yaml.MapSlice: + + o := make(JSONMapSlice, len(in)) + for i, mi := range in { + var nmi JSONMapItem + if nmi.Key, err = format(mi.Key); err != nil { + return nil, err + } + + v, ert := transformData(mi.Value) + if ert != nil { + return nil, ert + } + nmi.Value = v + o[i] = nmi + } + return o, nil + case map[interface{}]interface{}: + o := make(JSONMapSlice, 0, len(in)) + for ke, va := range in { + var nmi JSONMapItem + if nmi.Key, err = format(ke); err != nil { + return nil, err + } + + v, ert := transformData(va) + if ert != nil { + return nil, ert + } + nmi.Value = v + o = append(o, nmi) + } + return o, nil + case []interface{}: + len1 := len(in) + o := make([]interface{}, len1) + for i := 0; i < len1; i++ { + o[i], err = transformData(in[i]) + if err != nil { + return nil, err + } + } + return o, nil + } + return input, nil +} + +// YAMLDoc loads a yaml document from either http or a file and converts it to json +func YAMLDoc(path string) (json.RawMessage, error) { + yamlDoc, err := YAMLData(path) + if err != nil { + return nil, err + } + + data, err := YAMLToJSON(yamlDoc) + if err != nil { + return nil, err + } + + return data, nil +} + +// YAMLData loads a yaml document from either http or a file +func YAMLData(path string) (interface{}, error) { + data, err := LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + + return BytesToYAMLDoc(data) +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index 686bd2a0..341c6f57 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -527,6 +527,7 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) newb, err := encodeExtension(extension, value) if err != nil { return err diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go index 53ebd8cc..6f1ae120 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -154,6 +154,10 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) return EncodeExtensionMap(m.extensionsWrite(), data) } +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { o := 0 for _, e := range m { @@ -169,6 +173,23 @@ func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { return o, nil } +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { e := m[id] if err := e.Encode(); err != nil { diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 00000000..32017f8f --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 00000000..2133562b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,616 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + s := newState(opts) + s.compareAny(&pathStep{t, vx, vy}) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + eq := Equal(x, y, Options(opts), Reporter(r)) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 00000000..abc3a1c3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 00000000..59d4ee91 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 00000000..fe98dcc6 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 00000000..597b6ae5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 00000000..3d2e4266 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,372 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 00000000..a9e7fc0b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 00000000..01aed0a1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 00000000..c0b667f5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 00000000..ace1dbe8 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 00000000..0a01c479 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 00000000..da134ae2 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 00000000..938f646f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,104 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 00000000..d13a12cc --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,45 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import "reflect" + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 00000000..79344816 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,524 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 00000000..96fffd29 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,308 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 00000000..6ddf2999 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 00000000..05efb992 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 00000000..5521c604 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,279 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.AvoidStringer = true + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 00000000..8cb3265e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + lastLineIdx = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = (*prev).Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 00000000..80605d0e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,382 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name = name + "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 00000000..83031a7f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go index e674d0f3..92d2cc4a 100644 --- a/vendor/github.com/json-iterator/go/adapter.go +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -16,7 +16,7 @@ func Unmarshal(data []byte, v interface{}) error { return ConfigDefault.Unmarshal(data, v) } -// UnmarshalFromString convenient method to read from string instead of []byte +// UnmarshalFromString is a convenient method to read from string instead of []byte func UnmarshalFromString(str string, v interface{}) error { return ConfigDefault.UnmarshalFromString(str, v) } diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod new file mode 100644 index 00000000..e05c42ff --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.mod @@ -0,0 +1,11 @@ +module github.com/json-iterator/go + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/gofuzz v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum new file mode 100644 index 00000000..d778b5a1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go index f58beb91..e91eefb1 100644 --- a/vendor/github.com/json-iterator/go/iter_skip.go +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -37,17 +37,24 @@ func (iter *Iterator) SkipAndReturnBytes() []byte { return iter.stopCapture() } -type captureBuffer struct { - startedAt int - captured []byte +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() } -func (iter *Iterator) startCapture(captureStartedAt int) { +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { if iter.captured != nil { panic("already in capture mode") } iter.captureStartedAt = captureStartedAt - iter.captured = make([]byte, 0, 32) + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) } func (iter *Iterator) stopCapture() []byte { @@ -58,13 +65,7 @@ func (iter *Iterator) stopCapture() []byte { remaining := iter.buf[iter.captureStartedAt:iter.head] iter.captureStartedAt = -1 iter.captured = nil - if len(captured) == 0 { - copied := make([]byte, len(remaining)) - copy(copied, remaining) - return copied - } - captured = append(captured, remaining...) - return captured + return append(captured, remaining...) } // Skip skips a json object and positions to relatively the next json object diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go index 9042eb0c..f88722d1 100644 --- a/vendor/github.com/json-iterator/go/reflect_native.go +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -432,17 +432,19 @@ func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { } func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { + if codec.sliceType.UnsafeIsNil(ptr) { stream.WriteNil() return } + src := *((*[]byte)(ptr)) encoding := base64.StdEncoding stream.writeByte('"') - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } stream.writeByte('"') } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 355d2d11..932641ac 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -530,8 +530,8 @@ func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *It } } if fieldDecoder == nil { - msg := "found unknown field: " + field if decoder.disallowUnknownFields { + msg := "found unknown field: " + field iter.ReportError("ReadObject", msg) } c := iter.nextToken() diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go index f318d2c5..826aa594 100644 --- a/vendor/github.com/json-iterator/go/stream_float.go +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -1,6 +1,7 @@ package jsoniter import ( + "fmt" "math" "strconv" ) @@ -13,6 +14,10 @@ func init() { // WriteFloat32 write float32 to stream func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } abs := math.Abs(float64(val)) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. @@ -26,6 +31,10 @@ func (stream *Stream) WriteFloat32(val float32) { // WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } if val < 0 { stream.writeByte('-') val = -val @@ -54,6 +63,10 @@ func (stream *Stream) WriteFloat32Lossy(val float32) { // WriteFloat64 write float64 to stream func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } abs := math.Abs(val) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. @@ -67,6 +80,10 @@ func (stream *Stream) WriteFloat64(val float64) { // WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } if val < 0 { stream.writeByte('-') val = -val diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 00000000..fbff658f --- /dev/null +++ b/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 00000000..07fb4bc1 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,270 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size < config.PooledSize { + return make([]byte, 0, size) + } + + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) >= s { + return + } + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendBytes appends a string to buffer. +func (b *Buffer) AppendString(data string) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + var n int + for _, buf := range b.bufs { + if err == nil { + n, err = w.Write(buf) + written += n + } + putBuf(buf) + } + + if err == nil { + n, err = w.Write(b.Buf) + written += n + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. You can optionally provide one byte +// slice as argument that it will try to reuse. +func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { + if len(b.bufs) == 0 { + ret := b.Buf + b.toPool = nil + b.Buf = nil + return ret + } + + var ret []byte + size := b.Size() + + // If we got a buffer as argument and it is big enought, reuse it. + if len(reuse) == 1 && cap(reuse[0]) >= size { + ret = reuse[0][:0] + } else { + ret = make([]byte, 0, size) + } + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} + +type readCloser struct { + offset int + bufs [][]byte +} + +func (r *readCloser) Read(p []byte) (n int, err error) { + for _, buf := range r.bufs { + // Copy as much as we can. + x := copy(p[n:], buf[r.offset:]) + n += x // Increment how much we filled. + + // Did we empty the whole buffer? + if r.offset+x == len(buf) { + // On to the next buffer. + r.offset = 0 + r.bufs = r.bufs[1:] + + // We can release this buffer. + putBuf(buf) + } else { + r.offset += x + } + + if n == len(p) { + break + } + } + // No buffers left or nothing read? + if len(r.bufs) == 0 { + err = io.EOF + } + return +} + +func (r *readCloser) Close() error { + // Release all remaining buffers. + for _, buf := range r.bufs { + putBuf(buf) + } + // In case Close gets called multiple times. + r.bufs = nil + + return nil +} + +// ReadCloser creates an io.ReadCloser with all the contents of the buffer. +func (b *Buffer) ReadCloser() io.ReadCloser { + ret := &readCloser{0, append(b.bufs, b.Buf)} + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go new file mode 100644 index 00000000..ff7b27c5 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go @@ -0,0 +1,24 @@ +// This file will only be included to the build if neither +// easyjson_nounsafe nor appengine build tag is set. See README notes +// for more details. + +//+build !easyjson_nounsafe +//+build !appengine + +package jlexer + +import ( + "reflect" + "unsafe" +) + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go new file mode 100644 index 00000000..864d1be6 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go @@ -0,0 +1,13 @@ +// This file is included to the build if any of the buildtags below +// are defined. Refer to README notes for more details. + +//+build easyjson_nounsafe appengine + +package jlexer + +// bytesToStr creates a string normally from []byte +// +// Note that this method is roughly 1.5x slower than using the 'unsafe' method. +func bytesToStr(data []byte) string { + return string(data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go new file mode 100644 index 00000000..e90ec40d --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/error.go @@ -0,0 +1,15 @@ +package jlexer + +import "fmt" + +// LexerError implements the error interface and represents all possible errors that can be +// generated during parsing the JSON data. +type LexerError struct { + Reason string + Offset int + Data string +} + +func (l *LexerError) Error() string { + return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go new file mode 100644 index 00000000..ddd376b8 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -0,0 +1,1182 @@ +// Package jlexer contains a JSON lexer implementation. +// +// It is expected that it is mostly used with generated parser code, so the interface is tuned +// for a parser that knows what kind of data is expected. +package jlexer + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// tokenKind determines type of a token. +type tokenKind byte + +const ( + tokenUndef tokenKind = iota // No token. + tokenDelim // Delimiter: one of '{', '}', '[' or ']'. + tokenString // A string literal, e.g. "abc\u1234" + tokenNumber // Number literal, e.g. 1.5e5 + tokenBool // Boolean literal: true or false. + tokenNull // null keyword. +) + +// token describes a single token: type, position in the input and value. +type token struct { + kind tokenKind // Type of a token. + + boolValue bool // Value if a boolean literal token. + byteValue []byte // Raw value of a token. + delimValue byte +} + +// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. +type Lexer struct { + Data []byte // Input data given to the lexer. + + start int // Start of the current token. + pos int // Current unscanned position in the input stream. + token token // Last scanned token, if token.kind != tokenUndef. + + firstElement bool // Whether current element is the first in array or an object. + wantSep byte // A comma or a colon character, which need to occur before a token. + + UseMultipleErrors bool // If we want to use multiple errors. + fatalError error // Fatal error occurred during lexing. It is usually a syntax error. + multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. +} + +// FetchToken scans the input for the next token. +func (r *Lexer) FetchToken() { + r.token.kind = tokenUndef + r.start = r.pos + + // Check if r.Data has r.pos element + // If it doesn't, it mean corrupted input data + if len(r.Data) < r.pos { + r.errParse("Unexpected end of data") + return + } + // Determine the type of a token by skipping whitespace and reading the + // first character. + for _, c := range r.Data[r.pos:] { + switch c { + case ':', ',': + if r.wantSep == c { + r.pos++ + r.start++ + r.wantSep = 0 + } else { + r.errSyntax() + } + + case ' ', '\t', '\r', '\n': + r.pos++ + r.start++ + + case '"': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenString + r.fetchString() + return + + case '{', '[': + if r.wantSep != 0 { + r.errSyntax() + } + r.firstElement = true + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '}', ']': + if !r.firstElement && (r.wantSep != ',') { + r.errSyntax() + } + r.wantSep = 0 + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + if r.wantSep != 0 { + r.errSyntax() + } + r.token.kind = tokenNumber + r.fetchNumber() + return + + case 'n': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenNull + r.fetchNull() + return + + case 't': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = true + r.fetchTrue() + return + + case 'f': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = false + r.fetchFalse() + return + + default: + r.errSyntax() + return + } + } + r.fatalError = io.EOF + return +} + +// isTokenEnd returns true if the char can follow a non-delimiter token +func isTokenEnd(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' +} + +// fetchNull fetches and checks remaining bytes of null keyword. +func (r *Lexer) fetchNull() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'u' || + r.Data[r.pos-2] != 'l' || + r.Data[r.pos-1] != 'l' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchTrue fetches and checks remaining bytes of true keyword. +func (r *Lexer) fetchTrue() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'r' || + r.Data[r.pos-2] != 'u' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchFalse fetches and checks remaining bytes of false keyword. +func (r *Lexer) fetchFalse() { + r.pos += 5 + if r.pos > len(r.Data) || + r.Data[r.pos-4] != 'a' || + r.Data[r.pos-3] != 'l' || + r.Data[r.pos-2] != 's' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 5 + r.errSyntax() + } +} + +// fetchNumber scans a number literal token. +func (r *Lexer) fetchNumber() { + hasE := false + afterE := false + hasDot := false + + r.pos++ + for i, c := range r.Data[r.pos:] { + switch { + case c >= '0' && c <= '9': + afterE = false + case c == '.' && !hasDot: + hasDot = true + case (c == 'e' || c == 'E') && !hasE: + hasE = true + hasDot = true + afterE = true + case (c == '+' || c == '-') && afterE: + afterE = false + default: + r.pos += i + if !isTokenEnd(c) { + r.errSyntax() + } else { + r.token.byteValue = r.Data[r.start:r.pos] + } + return + } + } + + r.pos = len(r.Data) + r.token.byteValue = r.Data[r.start:] +} + +// findStringLen tries to scan into the string literal for ending quote char to determine required size. +// The size will be exact if no escapes are present and may be inexact if there are escaped chars. +func findStringLen(data []byte) (isValid, hasEscapes bool, length int) { + delta := 0 + + for i := 0; i < len(data); i++ { + switch data[i] { + case '\\': + i++ + delta++ + if i < len(data) && data[i] == 'u' { + delta++ + } + case '"': + return true, (delta > 0), (i - delta) + } + } + + return false, false, len(data) +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var val rune + for i := 2; i < len(s) && i < 6; i++ { + var v byte + c := s[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return -1 + } + + val <<= 4 + val |= rune(v) + } + return val +} + +// processEscape processes a single escape sequence and returns number of bytes processed. +func (r *Lexer) processEscape(data []byte) (int, error) { + if len(data) < 2 { + return 0, fmt.Errorf("syntax error at %v", string(data)) + } + + c := data[1] + switch c { + case '"', '/', '\\': + r.token.byteValue = append(r.token.byteValue, c) + return 2, nil + case 'b': + r.token.byteValue = append(r.token.byteValue, '\b') + return 2, nil + case 'f': + r.token.byteValue = append(r.token.byteValue, '\f') + return 2, nil + case 'n': + r.token.byteValue = append(r.token.byteValue, '\n') + return 2, nil + case 'r': + r.token.byteValue = append(r.token.byteValue, '\r') + return 2, nil + case 't': + r.token.byteValue = append(r.token.byteValue, '\t') + return 2, nil + case 'u': + rr := getu4(data) + if rr < 0 { + return 0, errors.New("syntax error") + } + + read := 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(data[read:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + read += 6 + rr = dec + } else { + rr = unicode.ReplacementChar + } + } + var d [4]byte + s := utf8.EncodeRune(d[:], rr) + r.token.byteValue = append(r.token.byteValue, d[:s]...) + return read, nil + } + + return 0, errors.New("syntax error") +} + +// fetchString scans a string literal token. +func (r *Lexer) fetchString() { + r.pos++ + data := r.Data[r.pos:] + + isValid, hasEscapes, length := findStringLen(data) + if !isValid { + r.pos += length + r.errParse("unterminated string literal") + return + } + if !hasEscapes { + r.token.byteValue = data[:length] + r.pos += length + 1 + return + } + + r.token.byteValue = make([]byte, 0, length) + p := 0 + for i := 0; i < len(data); { + switch data[i] { + case '"': + r.pos += i + 1 + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + i++ + return + + case '\\': + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + off, err := r.processEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return + } + i += off + p = i + + default: + i++ + } + } + r.errParse("unterminated string literal") +} + +// scanToken scans the next token if no token is currently available in the lexer. +func (r *Lexer) scanToken() { + if r.token.kind != tokenUndef || r.fatalError != nil { + return + } + + r.FetchToken() +} + +// consume resets the current token to allow scanning the next one. +func (r *Lexer) consume() { + r.token.kind = tokenUndef + r.token.delimValue = 0 +} + +// Ok returns true if no error (including io.EOF) was encountered during scanning. +func (r *Lexer) Ok() bool { + return r.fatalError == nil +} + +const maxErrorContextLen = 13 + +func (r *Lexer) errParse(what string) { + if r.fatalError == nil { + var str string + if len(r.Data)-r.pos <= maxErrorContextLen { + str = string(r.Data) + } else { + str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: what, + Offset: r.pos, + Data: str, + } + } +} + +func (r *Lexer) errSyntax() { + r.errParse("syntax error") +} + +func (r *Lexer) errInvalidToken(expected string) { + if r.fatalError != nil { + return + } + if r.UseMultipleErrors { + r.pos = r.start + r.consume() + r.SkipRecursive() + switch expected { + case "[": + r.token.delimValue = ']' + r.token.kind = tokenDelim + case "{": + r.token.delimValue = '}' + r.token.kind = tokenDelim + } + r.addNonfatalError(&LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + }) + return + } + + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } +} + +func (r *Lexer) GetPos() int { + return r.pos +} + +// Delim consumes a token and verifies that it is the given delimiter. +func (r *Lexer) Delim(c byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() || r.token.delimValue != c { + r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. + r.errInvalidToken(string([]byte{c})) + } else { + r.consume() + } +} + +// IsDelim returns true if there was no scanning error and next token is the given delimiter. +func (r *Lexer) IsDelim(c byte) bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return !r.Ok() || r.token.delimValue == c +} + +// Null verifies that the next token is null and consumes it. +func (r *Lexer) Null() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNull { + r.errInvalidToken("null") + } + r.consume() +} + +// IsNull returns true if the next token is a null keyword. +func (r *Lexer) IsNull() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return r.Ok() && r.token.kind == tokenNull +} + +// Skip skips a single token. +func (r *Lexer) Skip() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + r.consume() +} + +// SkipRecursive skips next array or object completely, or just skips a single token if not +// an array/object. +// +// Note: no syntax validation is performed on the skipped data. +func (r *Lexer) SkipRecursive() { + r.scanToken() + var start, end byte + + switch r.token.delimValue { + case '{': + start, end = '{', '}' + case '[': + start, end = '[', ']' + default: + r.consume() + return + } + + r.consume() + + level := 1 + inQuotes := false + wasEscape := false + + for i, c := range r.Data[r.pos:] { + switch { + case c == start && !inQuotes: + level++ + case c == end && !inQuotes: + level-- + if level == 0 { + r.pos += i + 1 + return + } + case c == '\\' && inQuotes: + wasEscape = !wasEscape + continue + case c == '"' && inQuotes: + inQuotes = wasEscape + case c == '"': + inQuotes = true + } + wasEscape = false + } + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "EOF reached while skipping array/object or token", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } +} + +// Raw fetches the next item recursively as a data slice +func (r *Lexer) Raw() []byte { + r.SkipRecursive() + if !r.Ok() { + return nil + } + return r.Data[r.start:r.pos] +} + +// IsStart returns whether the lexer is positioned at the start +// of an input string. +func (r *Lexer) IsStart() bool { + return r.pos == 0 +} + +// Consumed reads all remaining bytes from the input, publishing an error if +// there is anything but whitespace remaining. +func (r *Lexer) Consumed() { + if r.pos > len(r.Data) || !r.Ok() { + return + } + + for _, c := range r.Data[r.pos:] { + if c != ' ' && c != '\t' && c != '\r' && c != '\n' { + r.AddError(&LexerError{ + Reason: "invalid character '" + string(c) + "' after top-level value", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + }) + return + } + + r.pos++ + r.start++ + } +} + +func (r *Lexer) unsafeString() (string, []byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "", nil + } + bytes := r.token.byteValue + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret, bytes +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + ret, _ := r.unsafeString() + return ret +} + +// UnsafeBytes returns the byte slice if the token is a string literal. +func (r *Lexer) UnsafeBytes() []byte { + _, ret := r.unsafeString() + return ret +} + +// String reads a string literal. +func (r *Lexer) String() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + ret := string(r.token.byteValue) + r.consume() + return ret +} + +// Bytes reads a string literal and base64 decodes it into a byte slice. +func (r *Lexer) Bytes() []byte { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return nil + } + ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) + n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) + if err != nil { + r.fatalError = &LexerError{ + Reason: err.Error(), + } + return nil + } + + r.consume() + return ret[:n] +} + +// Bool reads a true or false boolean keyword. +func (r *Lexer) Bool() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenBool { + r.errInvalidToken("bool") + return false + } + ret := r.token.boolValue + r.consume() + return ret +} + +func (r *Lexer) number() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNumber { + r.errInvalidToken("number") + return "" + } + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +func (r *Lexer) Uint8() uint8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16() uint16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32() uint32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64() uint64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Uint() uint { + return uint(r.Uint64()) +} + +func (r *Lexer) Int8() int8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int8(n) +} + +func (r *Lexer) Int16() int16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int16(n) +} + +func (r *Lexer) Int32() int32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int32(n) +} + +func (r *Lexer) Int64() int64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Int() int { + return int(r.Int64()) +} + +func (r *Lexer) Uint8Str() uint8 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16Str() uint16 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32Str() uint32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64Str() uint64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) UintStr() uint { + return uint(r.Uint64Str()) +} + +func (r *Lexer) UintptrStr() uintptr { + return uintptr(r.Uint64Str()) +} + +func (r *Lexer) Int8Str() int8 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int8(n) +} + +func (r *Lexer) Int16Str() int16 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int16(n) +} + +func (r *Lexer) Int32Str() int32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int32(n) +} + +func (r *Lexer) Int64Str() int64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) IntStr() int { + return int(r.Int64Str()) +} + +func (r *Lexer) Float32() float32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return float32(n) +} + +func (r *Lexer) Float32Str() float32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return float32(n) +} + +func (r *Lexer) Float64() float64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Float64Str() float64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) Error() error { + return r.fatalError +} + +func (r *Lexer) AddError(e error) { + if r.fatalError == nil { + r.fatalError = e + } +} + +func (r *Lexer) AddNonFatalError(e error) { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + Reason: e.Error(), + }) +} + +func (r *Lexer) addNonfatalError(err *LexerError) { + if r.UseMultipleErrors { + // We don't want to add errors with the same offset. + if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { + return + } + r.multipleErrors = append(r.multipleErrors, err) + return + } + r.fatalError = err +} + +func (r *Lexer) GetNonFatalErrors() []*LexerError { + return r.multipleErrors +} + +// JsonNumber fetches and json.Number from 'encoding/json' package. +// Both int, float or string, contains them are valid values +func (r *Lexer) JsonNumber() json.Number { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() { + r.errInvalidToken("json.Number") + return json.Number("") + } + + switch r.token.kind { + case tokenString: + return json.Number(r.String()) + case tokenNumber: + return json.Number(r.Raw()) + case tokenNull: + r.Null() + return json.Number("") + default: + r.errSyntax() + return json.Number("") + } +} + +// Interface fetches an interface{} analogous to the 'encoding/json' package. +func (r *Lexer) Interface() interface{} { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() { + return nil + } + switch r.token.kind { + case tokenString: + return r.String() + case tokenNumber: + return r.Float64() + case tokenBool: + return r.Bool() + case tokenNull: + r.Null() + return nil + } + + if r.token.delimValue == '{' { + r.consume() + + ret := map[string]interface{}{} + for !r.IsDelim('}') { + key := r.String() + r.WantColon() + ret[key] = r.Interface() + r.WantComma() + } + r.Delim('}') + + if r.Ok() { + return ret + } else { + return nil + } + } else if r.token.delimValue == '[' { + r.consume() + + ret := []interface{}{} + for !r.IsDelim(']') { + ret = append(ret, r.Interface()) + r.WantComma() + } + r.Delim(']') + + if r.Ok() { + return ret + } else { + return nil + } + } + r.errSyntax() + return nil +} + +// WantComma requires a comma to be present before fetching next token. +func (r *Lexer) WantComma() { + r.wantSep = ',' + r.firstElement = false +} + +// WantColon requires a colon to be present before fetching next token. +func (r *Lexer) WantColon() { + r.wantSep = ':' + r.firstElement = false +} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 00000000..b9ed7cca --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,390 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but +// Flags field in Writer is used to set and pass them around. +type Flags int + +const ( + NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. + NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. +) + +// Writer is a JSON writer. +type Writer struct { + Flags Flags + + Error error + Buffer buffer.Buffer + NoEscapeHTML bool +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice +// as argument that it will try to reuse. +func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(reuse...), nil +} + +// ReadCloser returns an io.ReadCloser that can be used to read the data. +// ReadCloser also resets the buffer. +func (w *Writer) ReadCloser() (io.ReadCloser, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.ReadCloser(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +// RawText encloses raw binary data in quotes and appends in to the buffer. +// Useful for calling with results of MarshalText-like functions. +func (w *Writer) RawText(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.String(string(data)) + default: + w.RawString("null") + } +} + +// Base64Bytes appends data to the buffer after base64 encoding it +func (w *Writer) Base64Bytes(data []byte) { + if data == nil { + w.Buffer.AppendString("null") + return + } + w.Buffer.AppendByte('"') + w.base64(data) + w.Buffer.AppendByte('"') +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintptrStr(n uintptr) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float32Str(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Float64Str(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func isNotEscapedSingleChar(c byte, escapeHTML bool) bool { + // Note: might make sense to use a table if there are more chars to escape. With 4 chars + // it benchmarks the same. + if escapeHTML { + return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } else { + return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } +} + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + for i := 0; i < len(s); { + c := s[i] + + if isNotEscapedSingleChar(c, !w.NoEscapeHTML) { + // single-width character, no escaping is required + i++ + continue + } else if c < utf8.RuneSelf { + // single-with character, need to escape + w.Buffer.AppendString(s[p:i]) + switch c { + case '\t': + w.Buffer.AppendString(`\t`) + case '\r': + w.Buffer.AppendString(`\r`) + case '\n': + w.Buffer.AppendString(`\n`) + case '\\': + w.Buffer.AppendString(`\\`) + case '"': + w.Buffer.AppendString(`\"`) + default: + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} + +const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" +const padChar = '=' + +func (w *Writer) base64(in []byte) { + + if len(in) == 0 { + return + } + + w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) + + si := 0 + n := (len(in) / 3) * 3 + + for si < n { + // Convert 3x 8bit source bytes into 4 bytes + val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) + + si += 3 + } + + remain := len(in) - si + if remain == 0 { + return + } + + // Add the remaining small block + val := uint(in[si+0]) << 16 + if remain == 2 { + val |= uint(in[si+1]) << 8 + } + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) + + switch remain { + case 2: + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) + case 1: + w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) + } +} diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 6d489112..3acfb558 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -4,6 +4,7 @@ import ( "reflect" "runtime" "strings" + "sync" "unsafe" ) @@ -15,10 +16,17 @@ func typelinks1() [][]unsafe.Pointer //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) -var types = map[string]reflect.Type{} -var packages = map[string]map[string]reflect.Type{} +// initOnce guards initialization of types and packages +var initOnce sync.Once + +var types map[string]reflect.Type +var packages map[string]map[string]reflect.Type + +// discoverTypes initializes types and packages +func discoverTypes() { + types = make(map[string]reflect.Type) + packages = make(map[string]map[string]reflect.Type) -func init() { ver := runtime.Version() if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { loadGo15Types() @@ -90,11 +98,13 @@ type emptyInterface struct { // TypeByName return the type by its name, just like Class.forName in java func TypeByName(typeName string) Type { + initOnce.Do(discoverTypes) return Type2(types[typeName]) } // TypeByPackageName return the type by its package and name func TypeByPackageName(pkgPath string, name string) Type { + initOnce.Do(discoverTypes) pkgTypes := packages[pkgPath] if pkgTypes == nil { return nil diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml deleted file mode 100644 index 3deb4a12..00000000 --- a/vendor/github.com/pborman/uuid/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - "1.9" - - "1.10" - - "1.11" - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f..00000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS deleted file mode 100644 index b382a04e..00000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTORS +++ /dev/null @@ -1 +0,0 @@ -Paul Borman diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md deleted file mode 100644 index 810ad40d..00000000 --- a/vendor/github.com/pborman/uuid/README.md +++ /dev/null @@ -1,15 +0,0 @@ -This project was automatically exported from code.google.com/p/go-uuid - -# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. - -This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). - -###### Install -`go get github.com/pborman/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) - -Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go deleted file mode 100644 index 50a0f2d0..00000000 --- a/vendor/github.com/pborman/uuid/dce.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) UUID { - uuid := NewUUID() - if uuid != nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCEPerson(Person, uint32(os.Getuid())) -func NewDCEPerson() UUID { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCEGroup(Group, uint32(os.Getgid())) -func NewDCEGroup() UUID { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID or false. -func (uuid UUID) Domain() (Domain, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return Domain(uuid[9]), true -} - -// Id returns the id for a Version 2 UUID or false. -func (uuid UUID) Id() (uint32, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return binary.BigEndian.Uint32(uuid[0:4]), true -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go deleted file mode 100644 index 727d7616..00000000 --- a/vendor/github.com/pborman/uuid/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The uuid package generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// This package is a partial wrapper around the github.com/google/uuid package. -// This package represents a UUID as []byte while github.com/google/uuid -// represents a UUID as [16]byte. -package uuid diff --git a/vendor/github.com/pborman/uuid/go.mod b/vendor/github.com/pborman/uuid/go.mod deleted file mode 100644 index 099fc7de..00000000 --- a/vendor/github.com/pborman/uuid/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pborman/uuid - -require github.com/google/uuid v1.0.0 diff --git a/vendor/github.com/pborman/uuid/go.sum b/vendor/github.com/pborman/uuid/go.sum deleted file mode 100644 index db2574a9..00000000 --- a/vendor/github.com/pborman/uuid/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go deleted file mode 100644 index a0420c1e..00000000 --- a/vendor/github.com/pborman/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known Name Space IDs and UUIDs -var ( - NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") - NIL = Parse("00000000-0000-0000-0000-000000000000") -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space) - h.Write([]byte(data)) - s := h.Sum(nil) - uuid := make([]byte, 16) - copy(uuid, s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go deleted file mode 100644 index 35b89352..00000000 --- a/vendor/github.com/pborman/uuid/marshal.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "errors" - "fmt" - - guuid "github.com/google/uuid" -) - -// MarshalText implements encoding.TextMarshaler. -func (u UUID) MarshalText() ([]byte, error) { - if len(u) != 16 { - return nil, nil - } - var js [36]byte - encodeHex(js[:], u) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *UUID) UnmarshalText(data []byte) error { - if len(data) == 0 { - return nil - } - id := Parse(string(data)) - if id == nil { - return errors.New("invalid UUID") - } - *u = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u UUID) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *UUID) UnmarshalBinary(data []byte) error { - if len(data) == 0 { - return nil - } - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - var id [16]byte - copy(id[:], data) - *u = id[:] - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (u Array) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], u[:]) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *Array) UnmarshalText(data []byte) error { - id, err := guuid.ParseBytes(data) - if err != nil { - return err - } - *u = Array(id) - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u Array) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *Array) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(u[:], data) - return nil -} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go deleted file mode 100644 index e524e010..00000000 --- a/vendor/github.com/pborman/uuid/node.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - return guuid.NodeInterface() -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - return guuid.SetNodeInterface(name) -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - return guuid.NodeID() -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - return guuid.SetNodeID(id) -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - if len(uuid) != 16 { - return nil - } - node := make([]byte, 6) - copy(node, uuid[10:]) - return node -} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go deleted file mode 100644 index 929c3847..00000000 --- a/vendor/github.com/pborman/uuid/sql.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "errors" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src.(type) { - case string: - // if an empty UUID comes from a table, we return a null UUID - if src.(string) == "" { - return nil - } - - // see uuid.Parse for required string format - parsed := Parse(src.(string)) - - if parsed == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = parsed - case []byte: - b := src.([]byte) - - // if an empty UUID comes from a table, we return a null UUID - if len(b) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(b) == 16 { - parsed := make([]byte, 16) - copy(parsed, b) - *uuid = UUID(parsed) - } else { - u := Parse(string(b)) - - if u == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = u - } - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go deleted file mode 100644 index 7286824d..00000000 --- a/vendor/github.com/pborman/uuid/time.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - - guuid "github.com/google/uuid" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time = guuid.Time - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { return guuid.GetTime() } - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence a new random -// clock sequence is generated the first time a clock sequence is requested by -// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated -// for -func ClockSequence() int { return guuid.ClockSequence() } - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. It returns false if uuid is not valid. The time is only well defined -// for version 1 and 2 UUIDs. -func (uuid UUID) Time() (Time, bool) { - if len(uuid) != 16 { - return 0, false - } - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time), true -} - -// ClockSequence returns the clock sequence encoded in uuid. It returns false -// if uuid is not valid. The clock sequence is only well defined for version 1 -// and 2 UUIDs. -func (uuid UUID) ClockSequence() (int, bool) { - if len(uuid) != 16 { - return 0, false - } - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true -} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go deleted file mode 100644 index 255b5e24..00000000 --- a/vendor/github.com/pborman/uuid/util.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts the the first two hex bytes of x into a byte. -func xtob(x string) (byte, bool) { - b1 := xvalues[x[0]] - b2 := xvalues[x[1]] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go deleted file mode 100644 index 33700042..00000000 --- a/vendor/github.com/pborman/uuid/uuid.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "io" - - guuid "github.com/google/uuid" -) - -// Array is a pass-by-value UUID that can be used as an effecient key in a map. -type Array [16]byte - -// UUID converts uuid into a slice. -func (uuid Array) UUID() UUID { - return uuid[:] -} - -// String returns the string representation of uuid, -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (uuid Array) String() string { - return guuid.UUID(uuid).String() -} - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID []byte - -// A Version represents a UUIDs version. -type Version = guuid.Version - -// A Variant represents a UUIDs variant. -type Variant = guuid.Variant - -// Constants returned by Variant. -const ( - Invalid = guuid.Invalid // Invalid UUID - RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 - Reserved = guuid.Reserved // Reserved, NCS backward compatibility. - Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future = guuid.Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// New returns a new random (version 4) UUID as a string. It is a convenience -// function for NewRandom().String(). -func New() string { - return NewRandom().String() -} - -// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for -// the formats parsed. -func Parse(s string) UUID { - gu, err := guuid.Parse(s) - if err == nil { - return gu[:] - } - return nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - gu, err := guuid.ParseBytes(b) - if err == nil { - return gu[:], nil - } - return nil, err -} - -// Equal returns true if uuid1 and uuid2 are equal. -func Equal(uuid1, uuid2 UUID) bool { - return bytes.Equal(uuid1, uuid2) -} - -// Array returns an array representation of uuid that can be used as a map key. -// Array panics if uuid is not valid. -func (uuid UUID) Array() Array { - if len(uuid) != 16 { - panic("invalid uuid") - } - var a Array - copy(a[:], uuid) - return a -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - if len(uuid) != 16 { - return "" - } - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - if len(uuid) != 16 { - return "" - } - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. It returns Invalid if -// uuid is invalid. -func (uuid UUID) Variant() Variant { - if len(uuid) != 16 { - return Invalid - } - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. It returns false if uuid is not -// valid. -func (uuid UUID) Version() (Version, bool) { - if len(uuid) != 16 { - return 0, false - } - return Version(uuid[6] >> 4), true -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - guuid.SetRand(r) -} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go deleted file mode 100644 index 7af948da..00000000 --- a/vendor/github.com/pborman/uuid/version1.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil. -func NewUUID() UUID { - gu, err := guuid.NewUUID() - if err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go deleted file mode 100644 index 767dd0c3..00000000 --- a/vendor/github.com/pborman/uuid/version4.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import guuid "github.com/google/uuid" - -// NewRandom returns a Random (Version 4) UUID or panics. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() UUID { - if gu, err := guuid.NewRandom(); err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml index f8a63b30..00d04cb9 100644 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -3,8 +3,9 @@ sudo: false language: go go: - - 1.7.3 - - 1.8.1 + - 1.9.x + - 1.10.x + - 1.11.x - tip matrix: @@ -12,7 +13,7 @@ matrix: - go: tip install: - - go get github.com/golang/lint/golint + - go get golang.org/x/lint/golint - export PATH=$GOPATH/bin:$PATH - go install ./... diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index b052414d..7eacc5bd 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -86,8 +86,8 @@ fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) ``` -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. +There are helper functions available to get the value stored in a Flag if you have a FlagSet but find +it difficult to keep up with all of the pointers in your code. If you have a pflag.FlagSet with a flag called 'flagname' of type int you can use GetInt() to get the int value. But notice that 'flagname' must exist and it must be an int. GetString("flagname") will fail. diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go index 5af02f1a..3731370d 100644 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ b/vendor/github.com/spf13/pflag/bool_slice.go @@ -71,6 +71,44 @@ func (s *boolSliceValue) String() string { return "[" + out + "]" } +func (s *boolSliceValue) fromString(val string) (bool, error) { + return strconv.ParseBool(val) +} + +func (s *boolSliceValue) toString(val bool) string { + return strconv.FormatBool(val) +} + +func (s *boolSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *boolSliceValue) Replace(val []string) error { + out := make([]bool, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *boolSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func boolSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index aa126e44..a0b2679f 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -46,7 +46,7 @@ func (f *FlagSet) GetCount(name string) (int, error) { // CountVar defines a count flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) CountVar(p *int, name string, usage string) { f.CountVarP(p, name, "", usage) } @@ -69,7 +69,7 @@ func CountVarP(p *int, name, shorthand string, usage string) { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) Count(name string, usage string) *int { p := new(int) f.CountVarP(p, name, "", usage) diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go index 52c6b6dc..badadda5 100644 --- a/vendor/github.com/spf13/pflag/duration_slice.go +++ b/vendor/github.com/spf13/pflag/duration_slice.go @@ -51,6 +51,44 @@ func (s *durationSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *durationSliceValue) fromString(val string) (time.Duration, error) { + return time.ParseDuration(val) +} + +func (s *durationSliceValue) toString(val time.Duration) string { + return fmt.Sprintf("%s", val) +} + +func (s *durationSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *durationSliceValue) Replace(val []string) error { + out := make([]time.Duration, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *durationSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func durationSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 9beeda8e..24a5036e 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -57,9 +57,9 @@ that give one-letter shorthands for flags. You can use these by appending var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { - flag.BoolVarP("boolname", "b", true, "help message") + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") + flag.VarP(&flagval, "varname", "v", "help message") Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. @@ -190,6 +190,18 @@ type Value interface { Type() string } +// SliceValue is a secondary interface to all flags which hold a list +// of values. This allows full control over the value of list flags, +// and avoids complicated marshalling and unmarshalling to csv. +type SliceValue interface { + // Append adds the specified value to the end of the flag value list. + Append(string) error + // Replace will fully overwrite any data currently in the flag value list. + Replace([]string) error + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[NormalizedName]*Flag) []*Flag { list := make(sort.StringSlice, len(flags)) diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go new file mode 100644 index 00000000..caa35274 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float32Slice Value +type float32SliceValue struct { + value *[]float32 + changed bool +} + +func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { + isv := new(float32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return err + } + out[i] = float32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float32SliceValue) Type() string { + return "float32Slice" +} + +func (s *float32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float32SliceValue) fromString(val string) (float32, error) { + t64, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(t64), nil +} + +func (s *float32SliceValue) toString(val float32) string { + return fmt.Sprintf("%f", val) +} + +func (s *float32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float32SliceValue) Replace(val []string) error { + out := make([]float32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float32{}, nil + } + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return nil, err + } + out[i] = float32(temp64) + + } + return out, nil +} + +// GetFloat32Slice return the []float32 value of a flag with the given name +func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { + val, err := f.getFlagType(name, "float32Slice", float32SliceConv) + if err != nil { + return []float32{}, err + } + return val.([]float32), nil +} + +// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. +// The argument p points to a []float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. +// The argument p points to a float32[] variable in which to store the value of the flag. +func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func Float32Slice(name string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, "", value, usage) +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go new file mode 100644 index 00000000..85bf3073 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float64Slice Value +type float64SliceValue struct { + value *[]float64 + changed bool +} + +func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { + isv := new(float64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float64SliceValue) Type() string { + return "float64Slice" +} + +func (s *float64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float64SliceValue) fromString(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +func (s *float64SliceValue) toString(val float64) string { + return fmt.Sprintf("%f", val) +} + +func (s *float64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float64SliceValue) Replace(val []string) error { + out := make([]float64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float64{}, nil + } + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetFloat64Slice return the []float64 value of a flag with the given name +func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { + val, err := f.getFlagType(name, "float64Slice", float64SliceConv) + if err != nil { + return []float64{}, err + } + return val.([]float64), nil +} + +// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. +// The argument p points to a []float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. +// The argument p points to a float64[] variable in which to store the value of the flag. +func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func Float64Slice(name string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, "", value, usage) +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod new file mode 100644 index 00000000..b2287eec --- /dev/null +++ b/vendor/github.com/spf13/pflag/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/pflag + +go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go new file mode 100644 index 00000000..ff128ff0 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int32Slice Value +type int32SliceValue struct { + value *[]int32 + changed bool +} + +func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { + isv := new(int32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return err + } + out[i] = int32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int32SliceValue) Type() string { + return "int32Slice" +} + +func (s *int32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int32SliceValue) fromString(val string) (int32, error) { + t64, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(t64), nil +} + +func (s *int32SliceValue) toString(val int32) string { + return fmt.Sprintf("%d", val) +} + +func (s *int32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int32SliceValue) Replace(val []string) error { + out := make([]int32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int32{}, nil + } + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return nil, err + } + out[i] = int32(temp64) + + } + return out, nil +} + +// GetInt32Slice return the []int32 value of a flag with the given name +func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { + val, err := f.getFlagType(name, "int32Slice", int32SliceConv) + if err != nil { + return []int32{}, err + } + return val.([]int32), nil +} + +// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. +// The argument p points to a []int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. +// The argument p points to a int32[] variable in which to store the value of the flag. +func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func Int32Slice(name string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, "", value, usage) +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go new file mode 100644 index 00000000..25464638 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int64Slice Value +type int64SliceValue struct { + value *[]int64 + changed bool +} + +func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { + isv := new(int64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int64SliceValue) Type() string { + return "int64Slice" +} + +func (s *int64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int64SliceValue) fromString(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +func (s *int64SliceValue) toString(val int64) string { + return fmt.Sprintf("%d", val) +} + +func (s *int64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int64SliceValue) Replace(val []string) error { + out := make([]int64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int64{}, nil + } + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetInt64Slice return the []int64 value of a flag with the given name +func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { + val, err := f.getFlagType(name, "int64Slice", int64SliceConv) + if err != nil { + return []int64{}, err + } + return val.([]int64), nil +} + +// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. +// The argument p points to a []int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. +// The argument p points to a int64[] variable in which to store the value of the flag. +func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func Int64Slice(name string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, "", value, usage) +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go index 1e7c9edd..e71c39d9 100644 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -51,6 +51,36 @@ func (s *intSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *intSliceValue) Append(val string) error { + i, err := strconv.Atoi(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *intSliceValue) Replace(val []string) error { + out := make([]int, len(val)) + for i, d := range val { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *intSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = strconv.Itoa(d) + } + return out +} + func intSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go index 7dd196fe..775faae4 100644 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ b/vendor/github.com/spf13/pflag/ip_slice.go @@ -72,9 +72,47 @@ func (s *ipSliceValue) String() string { return "[" + out + "]" } +func (s *ipSliceValue) fromString(val string) (net.IP, error) { + return net.ParseIP(strings.TrimSpace(val)), nil +} + +func (s *ipSliceValue) toString(val net.IP) string { + return val.String() +} + +func (s *ipSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *ipSliceValue) Replace(val []string) error { + out := make([]net.IP, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *ipSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func ipSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IP{}, nil } diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index fa7bc601..4894af81 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -23,6 +23,32 @@ func (s *stringArrayValue) Set(val string) error { return nil } +func (s *stringArrayValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringArrayValue) Replace(val []string) error { + out := make([]string, len(val)) + for i, d := range val { + var err error + out[i] = d + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *stringArrayValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = d + } + return out +} + func (s *stringArrayValue) Type() string { return "stringArray" } diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index 0cd3ccc0..3cb2e69d 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -62,6 +62,20 @@ func (s *stringSliceValue) String() string { return "[" + str + "]" } +func (s *stringSliceValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringSliceValue) Replace(val []string) error { + *s.value = val + return nil +} + +func (s *stringSliceValue) GetSlice() []string { + return *s.value +} + func stringSliceConv(sval string) (interface{}, error) { sval = sval[1 : len(sval)-1] // An empty string would cause a slice with one (empty) string @@ -84,7 +98,7 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) { // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -100,7 +114,7 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -116,7 +130,7 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { @@ -136,7 +150,7 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSlice(name string, value []string, usage string) *[]string { diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go new file mode 100644 index 00000000..a807a04a --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int64.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt64 Value +type stringToInt64Value struct { + value *map[string]int64 + changed bool +} + +func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { + ssv := new(stringToInt64Value) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToInt64Value) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToInt64Value) Type() string { + return "stringToInt64" +} + +func (s *stringToInt64Value) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToInt64Conv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int64{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt64 return the map[string]int64 value of a flag with the given name +func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { + val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) + if err != nil { + return map[string]int64{}, err + } + return val.(map[string]int64), nil +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, "", value, usage) + return &p +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, "", value, usage) +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go index edd94c60..5fa92483 100644 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ b/vendor/github.com/spf13/pflag/uint_slice.go @@ -50,6 +50,48 @@ func (s *uintSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *uintSliceValue) fromString(val string) (uint, error) { + t, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return 0, err + } + return uint(t), nil +} + +func (s *uintSliceValue) toString(val uint) string { + return fmt.Sprintf("%d", val) +} + +func (s *uintSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *uintSliceValue) Replace(val []string) error { + out := make([]uint, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *uintSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func uintSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go index 2f04ee5b..d1b4fca3 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -7,6 +7,7 @@ package terminal import ( "bytes" "io" + "runtime" "strconv" "sync" "unicode/utf8" @@ -939,6 +940,8 @@ func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { // readPasswordLine reads from reader until it finds \n or io.EOF. // The slice returned does not include the \n. // readPasswordLine also ignores any \r it finds. +// Windows uses \r as end of line. So, on Windows, readPasswordLine +// reads until it finds \r and ignores any \n it finds during processing. func readPasswordLine(reader io.Reader) ([]byte, error) { var buf [1]byte var ret []byte @@ -947,10 +950,20 @@ func readPasswordLine(reader io.Reader) ([]byte, error) { n, err := reader.Read(buf[:]) if n > 0 { switch buf[0] { + case '\b': + if len(ret) > 0 { + ret = ret[:len(ret)-1] + } case '\n': - return ret, nil + if runtime.GOOS != "windows" { + return ret, nil + } + // otherwise ignore \n case '\r': - // remove \r from passwords on Windows + if runtime.GOOS == "windows" { + return ret, nil + } + // otherwise ignore \r default: ret = append(ret, buf[0]) } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 5cfdf8f3..f614e9cb 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -85,8 +85,8 @@ func ReadPassword(fd int) ([]byte, error) { } old := st - st &^= (windows.ENABLE_ECHO_INPUT) - st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + st &^= (windows.ENABLE_ECHO_INPUT | windows.ENABLE_LINE_INPUT) + st |= (windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_PROCESSED_INPUT) if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { return nil, err } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index d4abeb2b..5e01ce9a 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -52,10 +52,11 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 ) var ( @@ -163,6 +164,15 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +// maxQueuedControlFrames is the maximum number of control frames like +// SETTINGS, PING and RST_STREAM that will be queued for writing before +// the connection is closed to prevent memory exhaustion attacks. +func (s *Server) maxQueuedControlFrames() int { + // TODO: if anybody asks, add a Server field, and remember to define the + // behavior of negative values. + return maxQueuedControlFrames +} + type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -273,7 +283,20 @@ func ConfigureServer(s *http.Server, conf *Server) error { if testHookOnConn != nil { testHookOnConn() } + // The TLSNextProto interface predates contexts, so + // the net/http package passes down its per-connection + // base context via an exported but unadvertised + // method on the Handler. This is for internal + // net/http<=>http2 use only. + var ctx context.Context + type baseContexter interface { + BaseContext() context.Context + } + if bc, ok := h.(baseContexter); ok { + ctx = bc.BaseContext() + } conf.ServeConn(c, &ServeConnOpts{ + Context: ctx, Handler: h, BaseConfig: hs, }) @@ -284,6 +307,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { // ServeConnOpts are options for the Server.ServeConn method. type ServeConnOpts struct { + // Context is the base context to use. + // If nil, context.Background is used. + Context context.Context + // BaseConfig optionally sets the base configuration // for values. If nil, defaults are used. BaseConfig *http.Server @@ -294,6 +321,13 @@ type ServeConnOpts struct { Handler http.Handler } +func (o *ServeConnOpts) context() context.Context { + if o.Context != nil { + return o.Context + } + return context.Background() +} + func (o *ServeConnOpts) baseConfig() *http.Server { if o != nil && o.BaseConfig != nil { return o.BaseConfig @@ -439,7 +473,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(opts.context()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) if hs := opts.baseConfig(); hs != nil { ctx = context.WithValue(ctx, http.ServerContextKey, hs) @@ -482,6 +516,7 @@ type serverConn struct { sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? + queuedControlFrames int // control frames in the writeSched queue clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client @@ -870,6 +905,14 @@ func (sc *serverConn) serve() { } } + // If the peer is causing us to generate a lot of control frames, + // but not reading them from us, assume they are trying to make us + // run out of memory. + if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + sc.vlogf("http2: too many control frames in send queue, closing connection") + return + } + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY // with no error code (graceful shutdown), don't start the timer until // all open streams have been completed. @@ -1069,6 +1112,14 @@ func (sc *serverConn) writeFrame(wr FrameWriteRequest) { } if !ignoreWrite { + if wr.isControl() { + sc.queuedControlFrames++ + // For extra safety, detect wraparounds, which should not happen, + // and pull the plug. + if sc.queuedControlFrames < 0 { + sc.conn.Close() + } + } sc.writeSched.Push(wr) } sc.scheduleFrameWrite() @@ -1186,10 +1237,8 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // If a frame is already being written, nothing happens. This will be called again // when the frame is done being written. // -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. +// If a frame isn't being written and we need to send one, the best frame +// to send is selected by writeSched. // // If a frame isn't being written and there's nothing else to send, we // flush the write buffer. @@ -1217,6 +1266,9 @@ func (sc *serverConn) scheduleFrameWrite() { } if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { if wr, ok := sc.writeSched.Pop(); ok { + if wr.isControl() { + sc.queuedControlFrames-- + } sc.startFrameWrite(wr) continue } @@ -1509,6 +1561,8 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error { if err := f.ForeachSetting(sc.processSetting); err != nil { return err } + // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be + // acknowledged individually, even if multiple are received before the ACK. sc.needToSendSettingsAck = true sc.scheduleFrameWrite() return nil diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index c0c80d89..aeac7d8a 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -992,7 +992,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // See: https://zlib.net/zlib_faq.html#faq39 // // Note that we don't request this for HEAD requests, // due to a bug in nginx: diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index 4fe30730..f24d2b1e 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -32,7 +32,7 @@ type WriteScheduler interface { // Pop dequeues the next frame to write. Returns false if no frames can // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. + // order they are Push'd. No frames should be discarded except by CloseStream. Pop() (wr FrameWriteRequest, ok bool) } @@ -76,6 +76,12 @@ func (wr FrameWriteRequest) StreamID() uint32 { return wr.stream.id } +// isControl reports whether wr is a control frame for MaxQueuedControlFrames +// purposes. That includes non-stream frames and RST_STREAM frames. +func (wr FrameWriteRequest) isControl() bool { + return wr.stream == nil +} + // DataSize returns the number of flow control bytes that must be consumed // to write this entire frame. This is 0 for non-DATA frames. func (wr FrameWriteRequest) DataSize() int { diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go index 36d7919f..9a7b9e58 100644 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -19,7 +19,8 @@ type randomWriteScheduler struct { zero writeQueue // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. + // When a stream is idle, closed, or emptied, it's deleted + // from the map. sq map[uint32]*writeQueue // pool of empty queues for reuse. @@ -63,8 +64,12 @@ func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { return ws.zero.shift(), true } // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { + for streamID, q := range ws.sq { if wr, ok := q.consume(math.MaxInt32); ok { + if q.empty() { + delete(ws.sq, streamID) + ws.queuePool.put(q) + } return wr, true } } diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 72afe333..14e4d5ca 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -91,9 +91,13 @@ func onesCount64(x uint64) int { const m0 = 0x5555555555555555 // 01010101 ... const m1 = 0x3333333333333333 // 00110011 ... const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... - const m3 = 0x00ff00ff00ff00ff // etc. - const m4 = 0x0000ffff0000ffff + // Unused in this function, but definitions preserved for + // documentation purposes: + // + // const m3 = 0x00ff00ff00ff00ff // etc. + // const m4 = 0x0000ffff0000ffff + // // Implementation: Parallel summing of adjacent bits. // See "Hacker's Delight", Chap. 5: Counting Bits. // The following pattern shows the general approach: diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 4407c505..304016b6 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,16 +2,101 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix -import "syscall" +import "unsafe" + +// readInt returns the size-bytes unsigned integer in native byte order at offset off. +func readInt(b []byte, off, size uintptr) (u uint64, ok bool) { + if len(b) < int(off+size) { + return 0, false + } + if isBigEndian { + return readIntBE(b[off:], size), true + } + return readIntLE(b[off:], size), true +} + +func readIntBE(b []byte, size uintptr) uint64 { + switch size { + case 1: + return uint64(b[0]) + case 2: + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[1]) | uint64(b[0])<<8 + case 4: + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24 + case 8: + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 + default: + panic("syscall: readInt with unsupported size") + } +} + +func readIntLE(b []byte, size uintptr) uint64 { + switch size { + case 1: + return uint64(b[0]) + case 2: + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 + case 4: + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 + case 8: + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + default: + panic("syscall: readInt with unsupported size") + } +} // ParseDirent parses up to max directory entries in buf, // appending the names to names. It returns the number of // bytes consumed from buf, the number of entries added // to names, and the new names slice. func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { - return syscall.ParseDirent(buf, max, names) + origlen := len(buf) + count = 0 + for max != 0 && len(buf) > 0 { + reclen, ok := direntReclen(buf) + if !ok || reclen > uint64(len(buf)) { + return origlen, count, names + } + rec := buf[:reclen] + buf = buf[reclen:] + ino, ok := direntIno(rec) + if !ok { + break + } + if ino == 0 { // File absent in directory. + continue + } + const namoff = uint64(unsafe.Offsetof(Dirent{}.Name)) + namlen, ok := direntNamlen(rec) + if !ok || namoff+namlen > uint64(len(rec)) { + break + } + name := rec[namoff : namoff+namlen] + for i, c := range name { + if c == 0 { + name = name[:i] + break + } + } + // Check for useless names before allocating a string. + if string(name) == "." || string(name) == ".." { + continue + } + max-- + count++ + names = append(names, string(name)) + } + return origlen - len(buf), count, names } diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 085df2d8..bcdb5d30 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le +// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 80d00707..5a22eca9 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -105,25 +105,25 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; netbsd_386) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 4c91159c..14624b95 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -183,6 +183,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -198,6 +199,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -434,6 +436,8 @@ ccflags="$@" $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || + $2 ~ /^LO_(KEY|NAME)_SIZE$/ || + $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go index 4d5b531b..eb433205 100644 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -50,8 +50,8 @@ func main() { } // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) + valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) + b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) // Intentionally export __fds_bits field in FdSet fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go index 07f8960f..baa6ecd8 100644 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ b/vendor/golang.org/x/sys/unix/mksysnum.go @@ -139,7 +139,7 @@ func main() { text += format(name, num, proto) } case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:NO)?STD\s+({ \S+\s+(\w+).*)$`) { + if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { num, proto := t.sub[1], t.sub[2] name := fmt.Sprintf("SYS_%s", t.sub[3]) text += format(name, num, proto) diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go new file mode 100644 index 00000000..3a90aa6d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix dragonfly freebsd linux netbsd openbsd + +package unix + +// ReadDirent reads directory entries from fd and writes them into buf. +func ReadDirent(fd int, buf []byte) (n int, err error) { + return Getdents(fd, buf) +} diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go new file mode 100644 index 00000000..5fdae40b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package unix + +import "unsafe" + +// ReadDirent reads directory entries from fd and writes them into buf. +func ReadDirent(fd int, buf []byte) (n int, err error) { + // Final argument is (basep *uintptr) and the syscall doesn't take nil. + // 64 bits should be enough. (32 bits isn't even on 386). Since the + // actual system call is getdirentries64, 64 is a good guess. + // TODO(rsc): Can we use a single global basep for all calls? + var base = (*uintptr)(unsafe.Pointer(new(uint64))) + return Getdirentries(fd, buf, base) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 45e12fb8..1aa065f9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -280,8 +280,24 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return -1, ENOSYS } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true +} + //sys getdirent(fd int, buf []byte) (n int, err error) -func ReadDirent(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { return getdirent(fd, buf) } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 33c8b5f0..97a8eef6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -63,15 +63,6 @@ func Setgroups(gids []int) (err error) { return setgroups(len(a), &a[0]) } -func ReadDirent(fd int, buf []byte) (n int, err error) { - // Final argument is (basep *uintptr) and the syscall doesn't take nil. - // 64 bits should be enough. (32 bits isn't even on 386). Since the - // actual system call is getdirentries64, 64 is a good guess. - // TODO(rsc): Can we use a single global basep for all calls? - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) -} - // Wait status is 7 bits at bottom, either 0 (exited), // 0x7F (stopped), or a signal number that caused an exit. // The 0x80 bit is whether there was a core dump. @@ -86,6 +77,7 @@ const ( shift = 8 exited = 0 + killed = 9 stopped = 0x7F ) @@ -112,6 +104,8 @@ func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP } +func (w WaitStatus) Killed() bool { return w&mask == killed && syscall.Signal(w>>shift) != SIGKILL } + func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP } func (w WaitStatus) StopSignal() syscall.Signal { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 21200918..3e1cdfb5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -77,7 +77,18 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 489726fa..cd8be182 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -10,6 +10,8 @@ import ( "syscall" ) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 914b89bd..d0d07243 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -10,6 +10,8 @@ import ( "syscall" ) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 4a284cf5..01e8a38a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -8,6 +8,10 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 52dcd88f..e674f81d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -10,6 +10,10 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 962eee30..260a400f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -57,6 +57,22 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + namlen, ok := direntNamlen(buf) + if !ok { + return 0, false + } + return (16 + namlen + 1 + 7) &^ 7, true +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { @@ -269,6 +285,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) +//sys Getdents(fd int, buf []byte) (n int, err error) //sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index f135812a..329d240b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -82,6 +82,18 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + func Pipe(p []int) (err error) { return Pipe2(p, 0) } @@ -362,7 +374,21 @@ func Getdents(fd int, buf []byte) (n int, err error) { func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { if supportsABI(_ino64First) { - return getdirentries_freebsd12(fd, buf, basep) + if basep == nil || unsafe.Sizeof(*basep) == 8 { + return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) + } + // The freebsd12 syscall needs a 64-bit base. On 32-bit machines + // we can't just use the basep passed in. See #32498. + var base uint64 = uint64(*basep) + n, err = getdirentries_freebsd12(fd, buf, &base) + *basep = uintptr(base) + if base>>32 != 0 { + // We can't stuff the base back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO + } + return } // The old syscall entries are smaller than the new. Use 1/4 of the original @@ -507,6 +533,70 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +//sys ptrace(request int, pid int, addr uintptr, data int) (err error) + +func PtraceAttach(pid int) (err error) { + return ptrace(PTRACE_ATTACH, pid, 0, 0) +} + +func PtraceCont(pid int, signal int) (err error) { + return ptrace(PTRACE_CONT, pid, 1, signal) +} + +func PtraceDetach(pid int) (err error) { + return ptrace(PTRACE_DETACH, pid, 1, 0) +} + +func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { + return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) +} + +func PtraceGetFsBase(pid int, fsbase *int64) (err error) { + return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) +} + +func PtraceGetRegs(pid int, regsout *Reg) (err error) { + return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) +} + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} + +func PtraceLwpEvents(pid int, enable int) (err error) { + return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) +} + +func PtraceLwpInfo(pid int, info uintptr) (err error) { + return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) +} + +func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { + return PtraceIO(PIOD_READ_D, pid, addr, out, SizeofLong) +} + +func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) { + return PtraceIO(PIOD_READ_I, pid, addr, out, SizeofLong) +} + +func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) { + return PtraceIO(PIOD_WRITE_D, pid, addr, data, SizeofLong) +} + +func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { + return PtraceIO(PIOD_WRITE_I, pid, addr, data, SizeofLong) +} + +func PtraceSetRegs(pid int, regs *Reg) (err error) { + return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) +} + +func PtraceSingleStep(pid int) (err error) { + return ptrace(PTRACE_SINGLESTEP, pid, 1, 0) +} + /* * Exposed directly */ @@ -555,7 +645,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index c92545ea..637b5017 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,7 +13,6 @@ package unix import ( "encoding/binary" - "net" "runtime" "syscall" "unsafe" @@ -765,7 +764,7 @@ const px_proto_oe = 0 type SockaddrPPPoE struct { SID uint16 - Remote net.HardwareAddr + Remote []byte Dev string raw RawSockaddrPPPoX } @@ -916,7 +915,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } sa := &SockaddrPPPoE{ SID: binary.BigEndian.Uint16(pp[6:8]), - Remote: net.HardwareAddr(pp[8:14]), + Remote: pp[8:14], } for i := 14; i < 14+IFNAMSIZ; i++ { if pp[i] == 0 { @@ -1414,8 +1413,20 @@ func Reboot(cmd int) (err error) { return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "") } -func ReadDirent(fd int, buf []byte) (n int, err error) { - return Getdents(fd, buf) +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } //sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) @@ -1450,6 +1461,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Acct(path string) (err error) //sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) //sys Adjtimex(buf *Timex) (state int, err error) +//sys Capget(hdr *CapUserHeader, data *CapUserData) (err error) +//sys Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) @@ -1755,8 +1768,6 @@ func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err erro // Alarm // ArchPrctl // Brk -// Capget -// Capset // ClockNanosleep // ClockSettime // Clone diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 5240e16e..5ef30904 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -94,6 +94,18 @@ func nametomib(name string) (mib []_C_int, err error) { return mib, nil } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + func SysctlClockinfo(name string) (*Clockinfo, error) { mib, err := sysctlmib(name) if err != nil { @@ -120,9 +132,30 @@ func Pipe(p []int) (err error) { return } -//sys getdents(fd int, buf []byte) (n int, err error) +//sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return getdents(fd, buf) + n, err = Getdents(fd, buf) + if err != nil || basep == nil { + return + } + + var off int64 + off, err = Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + *basep = ^uintptr(0) + return + } + *basep = uintptr(off) + if unsafe.Sizeof(*basep) == 8 { + return + } + if off>>32 != 0 { + // We can't stuff the offset back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO + } + return } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index c8648ec0..1a074b2f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -43,6 +43,18 @@ func nametomib(name string) (mib []_C_int, err error) { return nil, EINVAL } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) +} + func SysctlClockinfo(name string) (*Clockinfo, error) { mib, err := sysctlmib(name) if err != nil { @@ -89,9 +101,30 @@ func Pipe(p []int) (err error) { return } -//sys getdents(fd int, buf []byte) (n int, err error) +//sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return getdents(fd, buf) + n, err = Getdents(fd, buf) + if err != nil || basep == nil { + return + } + + var off int64 + off, err = Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + *basep = ^uintptr(0) + return + } + *basep = uintptr(off) + if unsafe.Sizeof(*basep) == 8 { + return + } + if off>>32 != 0 { + // We can't stuff the offset back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO was allowed by getdirentries. + err = EIO + } + return } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index e4780127..0153a316 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -35,6 +35,22 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true +} + //sysnb pipe(p *[2]_C_int) (n int, err error) func Pipe(p []int) (err error) { @@ -189,6 +205,7 @@ func Setgroups(gids []int) (err error) { return setgroups(len(a), &a[0]) } +// ReadDirent reads directory entries from fd and writes them into buf. func ReadDirent(fd int, buf []byte) (n int, err error) { // Final argument is (basep *uintptr) and the syscall doesn't take nil. // TODO(rsc): Can we use a single global basep for all calls? diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go index 74707989..a121dc33 100644 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ b/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -243,11 +243,55 @@ const ( // Ptrace requests const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL + PTRACE_ATTACH = C.PT_ATTACH + PTRACE_CONT = C.PT_CONTINUE + PTRACE_DETACH = C.PT_DETACH + PTRACE_GETFPREGS = C.PT_GETFPREGS + PTRACE_GETFSBASE = C.PT_GETFSBASE + PTRACE_GETLWPLIST = C.PT_GETLWPLIST + PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS + PTRACE_GETREGS = C.PT_GETREGS + PTRACE_GETXSTATE = C.PT_GETXSTATE + PTRACE_IO = C.PT_IO + PTRACE_KILL = C.PT_KILL + PTRACE_LWPEVENTS = C.PT_LWP_EVENTS + PTRACE_LWPINFO = C.PT_LWPINFO + PTRACE_SETFPREGS = C.PT_SETFPREGS + PTRACE_SETREGS = C.PT_SETREGS + PTRACE_SINGLESTEP = C.PT_STEP + PTRACE_TRACEME = C.PT_TRACE_ME ) +const ( + PIOD_READ_D = C.PIOD_READ_D + PIOD_WRITE_D = C.PIOD_WRITE_D + PIOD_READ_I = C.PIOD_READ_I + PIOD_WRITE_I = C.PIOD_WRITE_I +) + +const ( + PL_FLAG_BORN = C.PL_FLAG_BORN + PL_FLAG_EXITED = C.PL_FLAG_EXITED + PL_FLAG_SI = C.PL_FLAG_SI +) + +const ( + TRAP_BRKPT = C.TRAP_BRKPT + TRAP_TRACE = C.TRAP_TRACE +) + +type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo + +type __Siginfo C.struct___siginfo + +type Sigset_t C.sigset_t + +type Reg C.struct_reg + +type FpReg C.struct_fpreg + +type PtraceIoDesc C.struct_ptrace_io_desc + // Events (kqueue, kevent) type Kevent_t C.struct_kevent_freebsd11 diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go index 2dd4f954..4a96d72c 100644 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -254,6 +254,7 @@ type Ptmget C.struct_ptmget const ( AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go index 8aafbe44..775cb57d 100644 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -241,6 +241,7 @@ type Winsize C.struct_winsize const ( AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 881e69f1..3fb475bc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -669,6 +722,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -934,6 +988,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1096,6 +1151,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1958,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2165,6 +2238,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2384,6 +2458,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x800854db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 039b007d..9c4e19f9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -669,6 +722,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -934,6 +988,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1096,6 +1151,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1959,6 +2028,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2166,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2385,6 +2459,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 97ed569a..a1f038c0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1965,6 +2034,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2172,6 +2245,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2391,6 +2465,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x800854db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d47f3ba6..504ce138 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -489,6 +541,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -671,6 +724,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -936,6 +990,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1098,6 +1153,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1949,6 +2018,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2157,6 +2230,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2376,6 +2450,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0ae030ee..58b64290 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1958,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2166,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2386,6 +2460,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x400854db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 91b49ddd..35e33de6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1958,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2166,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2386,6 +2460,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 7f1ef04e..574fcd8c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1958,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2166,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2386,6 +2460,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 724a244f..cdf0cf5f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1958,6 +2027,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2166,6 +2239,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2386,6 +2460,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x400854db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 25044629..eefdb328 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0xff CBAUDEX = 0x0 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2016,6 +2085,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2223,6 +2296,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2446,6 +2520,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e7c49911..78db2104 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0xff CBAUDEX = 0x0 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2016,6 +2085,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2223,6 +2296,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2446,6 +2520,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 0373d65a..0cd07f93 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1946,6 +2015,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2153,6 +2226,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2372,6 +2446,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index b2ed7ee6..ac4f1d9f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -334,6 +346,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -372,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -488,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -668,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -933,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1095,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2019,6 +2088,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2226,6 +2299,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2445,6 +2519,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 58067c52..8a12f141 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -199,6 +199,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -220,6 +222,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -241,16 +248,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -293,8 +303,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -337,6 +349,45 @@ const ( CAN_SFF_MASK = 0x7ff CAN_TP16 = 0x3 CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf @@ -375,6 +426,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -492,6 +544,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -672,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x1 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -937,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1099,6 +1154,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2011,6 +2080,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2218,6 +2291,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2434,6 +2508,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index c4ec7ff8..dd5ea36e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -377,16 +377,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1691,6 +1681,16 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 23346dc6..78ca9233 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -2341,6 +2326,21 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index 37b85b4f..f40465ca 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -264,6 +262,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index c142e33e..64df03c4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -2356,6 +2341,21 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 1a391519..debcb8ed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -266,6 +264,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 01cffbf4..ed330623 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index 994056f3..66af9f48 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 8f2691de..5258a732 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 61dc0d4c..f57f48f8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index ae9f1a21..cdfe9318 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -749,6 +749,23 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 80903e47..a783306b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index cd250ff0..f995520d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 290a9c2c..d681acd4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index c6df9d2e..5049b2ed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -404,6 +404,16 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 81d90a27..c5e46e4c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 0c184586..da8819e4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 18ef8a62..6ad9be6d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 2fba25d0..f8833178 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index c330f4ff..8eebc6c7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 8e9e0098..ecf62a67 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index c22d6260..1ba0f7b6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 700a99e9..20012b2f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index cec4c106..2b520dea 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 677ef5a6..d9f044c9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 565034c5..9feed65e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 7feb2c6b..0a651508 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 07655c45..e27f6693 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -408,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 642db767..7e058266 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 59585fee..d94d076a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 6ec31434..cf5bf3d0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 603d1443..243a9317 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 6a489fac..a9532d07 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 30cba434..0cb9f017 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index fa1beda3..6fc99b54 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index eb589904..27878a72 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 55c3a329..9474974b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index b39be6cb..48a7beae 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 44ffd4ce..4a6dfd4a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 9f21e955..3e51af8e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm64,freebsd @@ -7,13 +7,13 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } SYS_LINK = 9 // { int link(char *path, char *link); } SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } @@ -21,20 +21,20 @@ const ( SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } SYS_SETUID = 23 // { int setuid(uid_t uid); } SYS_GETUID = 24 // { uid_t getuid(void); } SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } SYS_ACCESS = 33 // { int access(char *path, int amode); } SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } @@ -42,56 +42,57 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } SYS_REBOOT = 55 // { int reboot(int opt); } SYS_REVOKE = 56 // { int revoke(char *path); } SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } SYS_GETPGRP = 81 // { int getpgrp(void); } SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } @@ -99,24 +100,24 @@ const ( SYS_RENAME = 128 // { int rename(char *from, char *to); } SYS_FLOCK = 131 // { int flock(int fd, int how); } SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, \ - SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, \ - SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, \ + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -127,269 +128,269 @@ const ( SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } SYS_UNDELETE = 205 // { int undelete(char *path); } SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_SEMGET = 221 // { int semget(key_t key, int nsems, \ - SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } - SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, \ - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, \ - SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, \ + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } - SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, \ - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( \ - SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, \ + SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } SYS_SCHED_YIELD = 331 // { int sched_yield (void); } SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } SYS___SETUGID = 374 // { int __setugid(int flag); } SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } - SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, \ - SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, \ + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } + SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } + SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } + SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } SYS_GETAUID = 447 // { int getauid(uid_t *auid); } SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, \ - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, \ - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, \ - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, \ - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, \ + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } - SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ - SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, \ - SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, \ - SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, \ + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } SYS_CAP_ENTER = 516 // { int cap_enter(void); } SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } + SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, \ - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, \ + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } + SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } + SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 33b6e4d1..e869c060 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -423,4 +423,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 9ba20784..4917b8ab 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -345,4 +345,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 94f68f10..f85fcb4f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 15c41351..678a119b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -290,4 +290,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 638465b1..222c9f9a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -408,4 +408,10 @@ const ( SYS_IO_URING_SETUP = 4425 SYS_IO_URING_ENTER = 4426 SYS_IO_URING_REGISTER = 4427 + SYS_OPEN_TREE = 4428 + SYS_MOVE_MOUNT = 4429 + SYS_FSOPEN = 4430 + SYS_FSCONFIG = 4431 + SYS_FSMOUNT = 4432 + SYS_FSPICK = 4433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 57ec82aa..28e6d0e9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -338,4 +338,10 @@ const ( SYS_IO_URING_SETUP = 5425 SYS_IO_URING_ENTER = 5426 SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 825a3e3b..e643c6f6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -338,4 +338,10 @@ const ( SYS_IO_URING_SETUP = 5425 SYS_IO_URING_ENTER = 5426 SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index f152dfdd..01d93c42 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -408,4 +408,10 @@ const ( SYS_IO_URING_SETUP = 4425 SYS_IO_URING_ENTER = 4426 SYS_IO_URING_REGISTER = 4427 + SYS_OPEN_TREE = 4428 + SYS_MOVE_MOUNT = 4429 + SYS_FSOPEN = 4430 + SYS_FSCONFIG = 4431 + SYS_FSMOUNT = 4432 + SYS_FSPICK = 4433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 7cbe78b1..5744149e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 51a2f123..21c83204 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 323432ae..c1bb6d8f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -289,4 +289,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 9dca9748..bc3cc6b5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -352,4 +352,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index d3da46f0..0a2841ba 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -367,4 +367,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 0edc5409..7312e95f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -324,11 +324,108 @@ const ( ) const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 + PTRACE_ATTACH = 0xa + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0xb + PTRACE_GETFPREGS = 0x23 + PTRACE_GETFSBASE = 0x47 + PTRACE_GETLWPLIST = 0xf + PTRACE_GETNUMLWPS = 0xe + PTRACE_GETREGS = 0x21 + PTRACE_GETXSTATE = 0x45 + PTRACE_IO = 0xc + PTRACE_KILL = 0x8 + PTRACE_LWPEVENTS = 0x18 + PTRACE_LWPINFO = 0xd + PTRACE_SETFPREGS = 0x24 + PTRACE_SETREGS = 0x22 + PTRACE_SINGLESTEP = 0x9 + PTRACE_TRACEME = 0x0 ) +const ( + PIOD_READ_D = 0x1 + PIOD_WRITE_D = 0x2 + PIOD_READ_I = 0x3 + PIOD_WRITE_I = 0x4 +) + +const ( + PL_FLAG_BORN = 0x100 + PL_FLAG_EXITED = 0x200 + PL_FLAG_SI = 0x20 +) + +const ( + TRAP_BRKPT = 0x1 + TRAP_TRACE = 0x2 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + X_reason [32]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + Fs uint32 + Es uint32 + Ds uint32 + Edi uint32 + Esi uint32 + Ebp uint32 + Isp uint32 + Ebx uint32 + Edx uint32 + Ecx uint32 + Eax uint32 + Trapno uint32 + Err uint32 + Eip uint32 + Cs uint32 + Eflags uint32 + Esp uint32 + Ss uint32 + Gs uint32 +} + +type FpReg struct { + Env [7]uint32 + Acc [8][10]uint8 + Ex_sw uint32 + Pad [64]uint8 +} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint +} + type Kevent_t struct { Ident uint32 Filter int16 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 8881ce84..29ba2f5b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -322,11 +322,115 @@ const ( ) const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 + PTRACE_ATTACH = 0xa + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0xb + PTRACE_GETFPREGS = 0x23 + PTRACE_GETFSBASE = 0x47 + PTRACE_GETLWPLIST = 0xf + PTRACE_GETNUMLWPS = 0xe + PTRACE_GETREGS = 0x21 + PTRACE_GETXSTATE = 0x45 + PTRACE_IO = 0xc + PTRACE_KILL = 0x8 + PTRACE_LWPEVENTS = 0x18 + PTRACE_LWPINFO = 0xd + PTRACE_SETFPREGS = 0x24 + PTRACE_SETREGS = 0x22 + PTRACE_SINGLESTEP = 0x9 + PTRACE_TRACEME = 0x0 ) +const ( + PIOD_READ_D = 0x1 + PIOD_WRITE_D = 0x2 + PIOD_READ_I = 0x3 + PIOD_WRITE_I = 0x4 +) + +const ( + PL_FLAG_BORN = 0x100 + PL_FLAG_EXITED = 0x200 + PL_FLAG_SI = 0x20 +) + +const ( + TRAP_BRKPT = 0x1 + TRAP_TRACE = 0x2 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + R15 int64 + R14 int64 + R13 int64 + R12 int64 + R11 int64 + R10 int64 + R9 int64 + R8 int64 + Rdi int64 + Rsi int64 + Rbp int64 + Rbx int64 + Rdx int64 + Rcx int64 + Rax int64 + Trapno uint32 + Fs uint16 + Gs uint16 + Err uint32 + Es uint16 + Ds uint16 + Rip int64 + Cs int64 + Rflags int64 + Rsp int64 + Ss int64 +} + +type FpReg struct { + Env [4]uint64 + Acc [8][16]uint8 + Xacc [16][16]uint8 + Spare [12]uint64 +} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint +} + type Kevent_t struct { Ident uint64 Filter int16 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index fc713999..b4090ef3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -322,11 +322,92 @@ const ( ) const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 + PTRACE_ATTACH = 0xa + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0xb + PTRACE_GETFPREGS = 0x23 + PTRACE_GETFSBASE = 0x47 + PTRACE_GETLWPLIST = 0xf + PTRACE_GETNUMLWPS = 0xe + PTRACE_GETREGS = 0x21 + PTRACE_GETXSTATE = 0x45 + PTRACE_IO = 0xc + PTRACE_KILL = 0x8 + PTRACE_LWPEVENTS = 0x18 + PTRACE_LWPINFO = 0xd + PTRACE_SETFPREGS = 0x24 + PTRACE_SETREGS = 0x22 + PTRACE_SINGLESTEP = 0x9 + PTRACE_TRACEME = 0x0 ) +const ( + PIOD_READ_D = 0x1 + PIOD_WRITE_D = 0x2 + PIOD_READ_I = 0x3 + PIOD_WRITE_I = 0x4 +) + +const ( + PL_FLAG_BORN = 0x100 + PL_FLAG_EXITED = 0x200 + PL_FLAG_SI = 0x20 +) + +const ( + TRAP_BRKPT = 0x1 + TRAP_TRACE = 0x2 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + X_reason [32]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + R [13]uint32 + R_sp uint32 + R_lr uint32 + R_pc uint32 + R_cpsr uint32 +} + +type FpReg struct { + Fpr_fpsr uint32 + Fpr [8][3]uint32 +} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint +} + type Kevent_t struct { Ident uint32 Filter int16 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 5a0753ee..1542a877 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -322,11 +322,93 @@ const ( ) const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 + PTRACE_ATTACH = 0xa + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0xb + PTRACE_GETFPREGS = 0x23 + PTRACE_GETFSBASE = 0x47 + PTRACE_GETLWPLIST = 0xf + PTRACE_GETNUMLWPS = 0xe + PTRACE_GETREGS = 0x21 + PTRACE_GETXSTATE = 0x45 + PTRACE_IO = 0xc + PTRACE_KILL = 0x8 + PTRACE_LWPEVENTS = 0x18 + PTRACE_LWPINFO = 0xd + PTRACE_SETFPREGS = 0x24 + PTRACE_SETREGS = 0x22 + PTRACE_SINGLESTEP = 0x9 + PTRACE_TRACEME = 0x0 ) +const ( + PIOD_READ_D = 0x1 + PIOD_WRITE_D = 0x2 + PIOD_READ_I = 0x3 + PIOD_WRITE_I = 0x4 +) + +const ( + PL_FLAG_BORN = 0x100 + PL_FLAG_EXITED = 0x200 + PL_FLAG_SI = 0x20 +) + +const ( + TRAP_BRKPT = 0x1 + TRAP_TRACE = 0x2 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + X_reason [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + X [30]uint64 + Lr uint64 + Sp uint64 + Elr uint64 + Spsr uint32 +} + +type FpReg struct { + Fp_q [32]uint128 + Fp_sr uint32 + Fp_cr uint32 +} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint +} + type Kevent_t struct { Ident uint64 Filter int16 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 06e3a3f4..50bc4128 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2467,3 +2467,57 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint32 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index cef25e73..055eaa76 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2480,3 +2480,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index c4369361..66019c9c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2458,3 +2458,57 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint32 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]uint8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 76c55e05..3104798c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2459,3 +2459,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 4302d574..46c86021 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2464,3 +2464,57 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 7ea742be..c2fe1a62 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2461,3 +2461,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 8f2b8ad4..f1eb0d39 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2461,3 +2461,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 865bf57d..8759bc36 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2464,3 +2464,57 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 2b68027d..a8120054 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2469,3 +2469,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 76cd7e64..74b7a919 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2469,3 +2469,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index f99f0615..ccea3e63 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -808,6 +808,7 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } @@ -2486,3 +2487,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d9d03ae4..d8fc0bc1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2483,3 +2483,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint64 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index b247fe94..5e0ab932 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2464,3 +2464,58 @@ const ( BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index a2268b4f..86736ab6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -411,6 +411,7 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x200 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 59e1da0a..3427811f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -418,6 +418,7 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x200 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 1f1f0f38..399f37a4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -416,6 +416,7 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x200 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 8dca204a..32f0c15d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -418,6 +418,7 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x200 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 900fb446..61ea0019 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -436,6 +436,7 @@ type Winsize struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 AT_SYMLINK_NOFOLLOW = 0x2 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 028fa78d..87a493f6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -436,6 +436,7 @@ type Winsize struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 AT_SYMLINK_NOFOLLOW = 0x2 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index b45d5eed..d80836ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -437,6 +437,7 @@ type Winsize struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 AT_SYMLINK_NOFOLLOW = 0x2 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index fa369a32..4e158746 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -430,6 +430,7 @@ type Winsize struct { const ( AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 AT_SYMLINK_NOFOLLOW = 0x2 ) diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash index f5c6888b..2163843a 100644 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -13,7 +13,7 @@ winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/* declare -A errors { - echo "// Code generated by 'go generate'; DO NOT EDIT." + echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." echo echo "package windows" echo "import \"syscall\"" diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash new file mode 100644 index 00000000..ab8924e9 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" +[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } + +{ + echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "type KNOWNFOLDERID GUID" + echo "var (" + while read -r line; do + [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue + printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ + "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ + $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ + $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) + done < "$knownfolders" + echo ")" +} | gofmt > "zknownfolderids_windows.go" diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 9a59b42f..847e00bc 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -159,6 +159,10 @@ type SERVICE_DESCRIPTION struct { Description *uint16 } +type SERVICE_DELAYED_AUTO_START_INFO struct { + IsDelayedAutoStartUp uint32 +} + type SERVICE_STATUS_PROCESS struct { ServiceType uint32 CurrentState uint32 @@ -200,12 +204,19 @@ type SC_ACTION struct { Delay uint32 } +type QUERY_SERVICE_LOCK_STATUS struct { + IsLocked uint32 + LockOwner *uint16 + LockDuration uint32 +} + //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW //sys DeleteService(service Handle) (err error) = advapi32.DeleteService //sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW //sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW //sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService //sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW //sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index c698da33..b2305092 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,6 +10,7 @@ import ( errorspkg "errors" "sync" "syscall" + "time" "unicode/utf16" "unsafe" ) @@ -171,8 +172,9 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CancelIo(s Handle) (err error) //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW -//sys OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) +//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) = shell32.ShellExecuteW +//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) //sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW @@ -194,6 +196,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock +//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -232,7 +235,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW //sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW //sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW -//sys getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo @@ -241,6 +244,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW //sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) //sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) // This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. //sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW @@ -262,6 +267,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass //sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) +//sys GetProcessId(process Handle) (id uint32, err error) +//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -284,6 +291,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW //sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW //sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW +//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString +//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 +//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid +//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree +//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion // syscall interface implementation for other packages @@ -498,6 +510,10 @@ func ComputerName() (name string, err error) { return string(utf16.Decode(b[0:n])), nil } +func DurationSinceBoot() time.Duration { + return time.Duration(getTickCount64()) * time.Millisecond +} + func Ftruncate(fd Handle, length int64) (err error) { curoffset, e := Seek(fd, 0, 1) if e != nil { @@ -1107,7 +1123,7 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } -func Getpid() (pid int) { return int(getCurrentProcessId()) } +func Getpid() (pid int) { return int(GetCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { // NOTE(rsc): The Win32finddata struct is wrong for the system call: @@ -1235,3 +1251,70 @@ func Readlink(path string, buf []byte) (n int, err error) { return n, nil } + +// GUIDFromString parses a string in the form of +// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. +func GUIDFromString(str string) (GUID, error) { + guid := GUID{} + str16, err := syscall.UTF16PtrFromString(str) + if err != nil { + return guid, err + } + err = clsidFromString(str16, &guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// GenerateGUID creates a new random GUID. +func GenerateGUID() (GUID, error) { + guid := GUID{} + err := coCreateGuid(&guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// String returns the canonical string form of the GUID, +// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +func (guid GUID) String() string { + var str [100]uint16 + chars := stringFromGUID2(&guid, &str[0], int32(len(str))) + if chars <= 1 { + return "" + } + return string(utf16.Decode(str[:chars-1])) +} + +// KnownFolderPath returns a well-known folder path for the current user, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + return Token(0).KnownFolderPath(folderID, flags) +} + +// KnownFolderPath returns a well-known folder path for the user token, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + var p *uint16 + err := shGetKnownFolderPath(folderID, flags, t, &p) + if err != nil { + return "", err + } + defer CoTaskMemFree(unsafe.Pointer(p)) + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil +} + +// RtlGetVersion returns the true version of the underlying operating system, ignoring +// any manifesting or compatibility layers on top of the win32 layer. +func RtlGetVersion() *OsVersionInfoEx { + info := &OsVersionInfoEx{} + info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) + // According to documentation, this function always succeeds. + // The function doesn't even check the validity of the + // osVersionInfoSize member. Disassembling ntdll.dll indicates + // that the documentation is indeed correct about that. + _ = rtlGetVersion(info) + return info +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 99b85f6d..1e3947f0 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -158,17 +158,50 @@ const ( WAIT_OBJECT_0 = 0x00000000 WAIT_FAILED = 0xFFFFFFFF - PROCESS_TERMINATE = 1 - PROCESS_QUERY_INFORMATION = 0x00000400 - SYNCHRONIZE = 0x00100000 + // Standard access rights. + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + SYNCHRONIZE = 0x00100000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + + // Access rights for process. + PROCESS_CREATE_PROCESS = 0x0080 + PROCESS_CREATE_THREAD = 0x0002 + PROCESS_DUP_HANDLE = 0x0040 + PROCESS_QUERY_INFORMATION = 0x0400 + PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 + PROCESS_SET_INFORMATION = 0x0200 + PROCESS_SET_QUOTA = 0x0100 + PROCESS_SUSPEND_RESUME = 0x0800 + PROCESS_TERMINATE = 0x0001 + PROCESS_VM_OPERATION = 0x0008 + PROCESS_VM_READ = 0x0010 + PROCESS_VM_WRITE = 0x0020 + + // Access rights for thread. + THREAD_DIRECT_IMPERSONATION = 0x0200 + THREAD_GET_CONTEXT = 0x0008 + THREAD_IMPERSONATE = 0x0100 + THREAD_QUERY_INFORMATION = 0x0040 + THREAD_QUERY_LIMITED_INFORMATION = 0x0800 + THREAD_SET_CONTEXT = 0x0010 + THREAD_SET_INFORMATION = 0x0020 + THREAD_SET_LIMITED_INFORMATION = 0x0400 + THREAD_SET_THREAD_TOKEN = 0x0080 + THREAD_SUSPEND_RESUME = 0x0002 + THREAD_TERMINATE = 0x0001 FILE_MAP_COPY = 0x01 FILE_MAP_WRITE = 0x02 FILE_MAP_READ = 0x04 FILE_MAP_EXECUTE = 0x20 - CTRL_C_EVENT = 0 - CTRL_BREAK_EVENT = 1 + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + CTRL_CLOSE_EVENT = 2 + CTRL_LOGOFF_EVENT = 5 + CTRL_SHUTDOWN_EVENT = 6 // Windows reserves errors >= 1<<29 for application use. APPLICATION_ERROR = 1 << 29 @@ -629,6 +662,16 @@ type ProcessEntry32 struct { ExeFile [MAX_PATH]uint16 } +type ThreadEntry32 struct { + Size uint32 + Usage uint32 + ThreadID uint32 + OwnerProcessID uint32 + BasePri int32 + DeltaPri int32 + Flags uint32 +} + type Systemtime struct { Year uint16 Month uint16 @@ -1590,3 +1633,36 @@ const ( JobObjectNotificationLimitInformation2 = 34 JobObjectSecurityLimitInformation = 5 ) + +const ( + KF_FLAG_DEFAULT = 0x00000000 + KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 + KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 + KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 + KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 + KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 + KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 + KF_FLAG_CREATE = 0x00008000 + KF_FLAG_DONT_VERIFY = 0x00004000 + KF_FLAG_DONT_UNEXPAND = 0x00002000 + KF_FLAG_NO_ALIAS = 0x00001000 + KF_FLAG_INIT = 0x00000800 + KF_FLAG_DEFAULT_PATH = 0x00000400 + KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 + KF_FLAG_SIMPLE_IDLIST = 0x00000100 + KF_FLAG_ALIAS_ONLY = 0x80000000 +) + +type OsVersionInfoEx struct { + osVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformId uint32 + CsdVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + _ byte +} diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go index 2b4cea5b..f0212003 100644 --- a/vendor/golang.org/x/sys/windows/zerrors_windows.go +++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go @@ -1,4 +1,4 @@ -// Code generated by 'go generate'; DO NOT EDIT. +// Code generated by 'mkerrors.bash'; DO NOT EDIT. package windows diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go new file mode 100644 index 00000000..6048ac67 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go @@ -0,0 +1,149 @@ +// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. + +package windows + +type KNOWNFOLDERID GUID + +var ( + FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} + FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} + FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} + FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} + FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} + FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} + FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} + FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} + FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} + FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} + FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} + FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} + FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} + FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} + FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} + FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} + FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} + FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} + FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} + FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} + FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} + FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} + FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} + FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} + FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} + FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} + FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} + FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} + FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} + FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} + FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} + FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} + FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} + FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} + FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} + FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} + FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} + FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} + FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} + FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} + FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} + FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} + FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} + FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} + FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} + FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} + FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} + FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} + FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} + FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} + FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} + FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} + FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} + FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} + FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} + FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} + FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} + FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} + FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} + FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} + FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} + FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} + FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} + FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} + FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} + FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} + FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} + FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} + FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} + FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} + FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} + FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} + FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} + FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} + FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} + FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} + FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} + FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} + FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} + FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} + FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} + FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} + FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} + FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} + FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} + FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} + FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} + FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} + FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} + FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} + FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} + FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} + FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} + FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} + FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} + FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} + FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} + FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} + FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} + FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} + FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} + FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} + FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} + FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} + FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} + FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} + FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} + FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} + FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} + FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} + FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} + FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} + FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} + FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} + FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} + FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} + FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} + FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} + FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} + FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} + FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} + FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} + FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} + FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} + FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} + FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} + FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} + FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} + FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} + FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} + FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} + FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} + FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} + FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} + FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} + FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} + FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} + FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} + FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index dcd10e06..d461bed9 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -42,6 +42,8 @@ var ( modmswsock = NewLazySystemDLL("mswsock.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") moduser32 = NewLazySystemDLL("user32.dll") + modole32 = NewLazySystemDLL("ole32.dll") + modntdll = NewLazySystemDLL("ntdll.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") modiphlpapi = NewLazySystemDLL("iphlpapi.dll") @@ -59,6 +61,7 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procStartServiceW = modadvapi32.NewProc("StartServiceW") procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") procControlService = modadvapi32.NewProc("ControlService") procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") @@ -112,6 +115,7 @@ var ( procCreateProcessW = modkernel32.NewProc("CreateProcessW") procOpenProcess = modkernel32.NewProc("OpenProcess") procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procTerminateProcess = modkernel32.NewProc("TerminateProcess") procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") @@ -133,6 +137,7 @@ var ( procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") procSetFileTime = modkernel32.NewProc("SetFileTime") procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") @@ -180,6 +185,8 @@ var ( procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procProcess32FirstW = modkernel32.NewProc("Process32FirstW") procProcess32NextW = modkernel32.NewProc("Process32NextW") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") @@ -200,6 +207,8 @@ var ( procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procOpenThread = modkernel32.NewProc("OpenThread") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") @@ -220,6 +229,11 @@ var ( procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") procWSAStartup = modws2_32.NewProc("WSAStartup") procWSACleanup = modws2_32.NewProc("WSACleanup") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -418,6 +432,18 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { return } +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { @@ -1064,14 +1090,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } -func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) { +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { var _p0 uint32 if inheritHandle { _p0 = 1 } else { _p0 = 0 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid)) + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { if e1 != 0 { @@ -1095,6 +1121,14 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui return } +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func TerminateProcess(handle Handle, exitcode uint32) (err error) { r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) if r1 == 0 { @@ -1373,6 +1407,12 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { return } +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) + return +} + func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) if r1 == 0 { @@ -1815,7 +1855,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 return } -func getCurrentProcessId() (pid uint32) { +func GetCurrentProcessId() (pid uint32) { r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) pid = uint32(r0) return @@ -1918,6 +1958,30 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { return } +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -2159,6 +2223,38 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro return } +func GetProcessId(process Handle) (id uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + id = uint32(r0) + if id == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { @@ -2399,6 +2495,41 @@ func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret return } +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) + return +} + +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + return +} + +func rtlGetVersion(info *OsVersionInfoEx) (ret error) { + r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { diff --git a/vendor/golang.org/x/text/width/gen.go b/vendor/golang.org/x/text/width/gen.go new file mode 100644 index 00000000..092277e1 --- /dev/null +++ b/vendor/golang.org/x/text/width/gen.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates the trie for width operations. The generated table +// includes width category information as well as the normalization mappings. +package main + +import ( + "bytes" + "fmt" + "io" + "log" + "math" + "unicode/utf8" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" +) + +// See gen_common.go for flags. + +func main() { + gen.Init() + genTables() + genTests() + gen.Repackage("gen_trieval.go", "trieval.go", "width") + gen.Repackage("gen_common.go", "common_test.go", "width") +} + +func genTables() { + t := triegen.NewTrie("width") + // fold and inverse mappings. See mapComment for a description of the format + // of each entry. Add dummy value to make an index of 0 mean no mapping. + inverse := [][4]byte{{}} + mapping := map[[4]byte]int{[4]byte{}: 0} + + getWidthData(func(r rune, tag elem, alt rune) { + idx := 0 + if alt != 0 { + var buf [4]byte + buf[0] = byte(utf8.EncodeRune(buf[1:], alt)) + s := string(r) + buf[buf[0]] ^= s[len(s)-1] + var ok bool + if idx, ok = mapping[buf]; !ok { + idx = len(mapping) + if idx > math.MaxUint8 { + log.Fatalf("Index %d does not fit in a byte.", idx) + } + mapping[buf] = idx + inverse = append(inverse, buf) + } + } + t.Insert(r, uint64(tag|elem(idx))) + }) + + w := &bytes.Buffer{} + gen.WriteUnicodeVersion(w) + + sz, err := t.Gen(w) + if err != nil { + log.Fatal(err) + } + + sz += writeMappings(w, inverse) + + fmt.Fprintf(w, "// Total table size %d bytes (%dKiB)\n", sz, sz/1024) + + gen.WriteVersionedGoFile(*outputFile, "width", w.Bytes()) +} + +const inverseDataComment = ` +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8.` + +func writeMappings(w io.Writer, data [][4]byte) int { + fmt.Fprintln(w, inverseDataComment) + fmt.Fprintf(w, "var inverseData = [%d][4]byte{\n", len(data)) + for _, x := range data { + fmt.Fprintf(w, "{ 0x%02x, 0x%02x, 0x%02x, 0x%02x },\n", x[0], x[1], x[2], x[3]) + } + fmt.Fprintln(w, "}") + return len(data) * 4 +} + +func genTests() { + w := &bytes.Buffer{} + fmt.Fprintf(w, "\nvar mapRunes = map[rune]struct{r rune; e elem}{\n") + getWidthData(func(r rune, tag elem, alt rune) { + if alt != 0 { + fmt.Fprintf(w, "\t0x%X: {0x%X, 0x%X},\n", r, alt, tag) + } + }) + fmt.Fprintln(w, "}") + gen.WriteGoFile("runes_test.go", "width", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/width/gen_common.go b/vendor/golang.org/x/text/width/gen_common.go new file mode 100644 index 00000000..601e7526 --- /dev/null +++ b/vendor/golang.org/x/text/width/gen_common.go @@ -0,0 +1,96 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This code is shared between the main code generator and the test code. + +import ( + "flag" + "log" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/ucd" +) + +var ( + outputFile = flag.String("out", "tables.go", "output file") +) + +var typeMap = map[string]elem{ + "A": tagAmbiguous, + "N": tagNeutral, + "Na": tagNarrow, + "W": tagWide, + "F": tagFullwidth, + "H": tagHalfwidth, +} + +// getWidthData calls f for every entry for which it is defined. +// +// f may be called multiple times for the same rune. The last call to f is the +// correct value. f is not called for all runes. The default tag type is +// Neutral. +func getWidthData(f func(r rune, tag elem, alt rune)) { + // Set the default values for Unified Ideographs. In line with Annex 11, + // we encode full ranges instead of the defined runes in Unified_Ideograph. + for _, b := range []struct{ lo, hi rune }{ + {0x4E00, 0x9FFF}, // the CJK Unified Ideographs block, + {0x3400, 0x4DBF}, // the CJK Unified Ideographs Externsion A block, + {0xF900, 0xFAFF}, // the CJK Compatibility Ideographs block, + {0x20000, 0x2FFFF}, // the Supplementary Ideographic Plane, + {0x30000, 0x3FFFF}, // the Tertiary Ideographic Plane, + } { + for r := b.lo; r <= b.hi; r++ { + f(r, tagWide, 0) + } + } + + inverse := map[rune]rune{} + maps := map[string]bool{ + "": true, + "": true, + } + + // We cannot reuse package norm's decomposition, as we need an unexpanded + // decomposition. We make use of the opportunity to verify that the + // decomposition type is as expected. + ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { + r := p.Rune(0) + s := strings.SplitN(p.String(ucd.DecompMapping), " ", 2) + if !maps[s[0]] { + return + } + x, err := strconv.ParseUint(s[1], 16, 32) + if err != nil { + log.Fatalf("Error parsing rune %q", s[1]) + } + if inverse[r] != 0 || inverse[rune(x)] != 0 { + log.Fatalf("Circular dependency in mapping between %U and %U", r, x) + } + inverse[r] = rune(x) + inverse[rune(x)] = r + }) + + // ; + ucd.Parse(gen.OpenUCDFile("EastAsianWidth.txt"), func(p *ucd.Parser) { + tag, ok := typeMap[p.String(1)] + if !ok { + log.Fatalf("Unknown width type %q", p.String(1)) + } + r := p.Rune(0) + alt, ok := inverse[r] + if tag == tagFullwidth || tag == tagHalfwidth && r != wonSign { + tag |= tagNeedsFold + if !ok { + log.Fatalf("Narrow or wide rune %U has no decomposition", r) + } + } + f(r, tag, alt) + }) +} diff --git a/vendor/golang.org/x/text/width/gen_trieval.go b/vendor/golang.org/x/text/width/gen_trieval.go new file mode 100644 index 00000000..c17334aa --- /dev/null +++ b/vendor/golang.org/x/text/width/gen_trieval.go @@ -0,0 +1,34 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go new file mode 100644 index 00000000..dd3febd4 --- /dev/null +++ b/vendor/golang.org/x/text/width/kind_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=Kind"; DO NOT EDIT. + +package width + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Neutral-0] + _ = x[EastAsianAmbiguous-1] + _ = x[EastAsianWide-2] + _ = x[EastAsianNarrow-3] + _ = x[EastAsianFullwidth-4] + _ = x[EastAsianHalfwidth-5] +} + +const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" + +var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} + +func (i Kind) String() string { + if i < 0 || i >= Kind(len(_Kind_index)-1) { + return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go new file mode 100644 index 00000000..decb8e48 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -0,0 +1,1318 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.10,!go1.13 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 101 blocks, 6464 entries, 12928 bytes +// The third block is the zero block. +var widthValues = [6464]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, + // Block 0x60, offset 0x1800 + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go new file mode 100644 index 00000000..d6def0e7 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -0,0 +1,1330 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 101 blocks, 6464 entries, 12928 bytes +// The third block is the zero block. +var widthValues = [6464]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, + 0x1230: 0x4000, 0x1231: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, + // Block 0x60, offset 0x1800 + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, + 0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x187a: 0x4000, + 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go new file mode 100644 index 00000000..7069e263 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -0,0 +1,1286 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build !go1.10 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 99 blocks, 6336 entries, 12672 bytes +// The third block is the zero block. +var widthValues = [6336]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12c4: 0x4000, + // Block 0x4c, offset 0x1300 + 0x130f: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, + 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, + 0x1350: 0x2000, 0x1351: 0x2000, + 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, + 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, + 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, + 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, + 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, + 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, + 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, + 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, + // Block 0x4e, offset 0x1380 + 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, + 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, + 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, + 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, + 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, + 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, + 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, + 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, + 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, + 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, + 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, + 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, + 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, + 0x1410: 0x4000, 0x1411: 0x4000, + 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, + 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, + 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, + 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, + 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, + 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, + 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, + // Block 0x51, offset 0x1440 + 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, + 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, + 0x1450: 0x4000, 0x1451: 0x4000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, + 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, + 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, + 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, + 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, + 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, + 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, + 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, + 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, + 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, + 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, + 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, + 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, + 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, + 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, + 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1574: 0x4000, + 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, + 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, + 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, + 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, + 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, + 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, + 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, + // Block 0x59, offset 0x1640 + 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, + 0x167a: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1695: 0x4000, 0x1696: 0x4000, + 0x16a4: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, + 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, + 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, + 0x1752: 0x4000, + 0x176b: 0x4000, 0x176c: 0x4000, + 0x1774: 0x4000, 0x1775: 0x4000, + 0x1776: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1790: 0x4000, 0x1791: 0x4000, + 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, + 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, + 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, + 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, + 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, + 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, + 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, + 0x17de: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, + 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, + 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, + 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, + 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, + 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, + 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, + 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, + 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, + 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, + 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, + 0x18bc: 0x2000, 0x18bd: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x48, + // Block 0x10, offset 0x400 + 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, + 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, + 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, + 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, + 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14680 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go new file mode 100644 index 00000000..0049f700 --- /dev/null +++ b/vendor/golang.org/x/text/width/transform.go @@ -0,0 +1,239 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package width + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +type foldTransform struct { + transform.NopResetter +} + +func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if elem(v)&tagNeedsFold != 0 { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if elem(v)&tagNeedsFold == 0 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type narrowTransform struct { + transform.NopResetter +} + +func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type wideTransform struct { + transform.NopResetter +} + +func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go new file mode 100644 index 00000000..ca8e45fd --- /dev/null +++ b/vendor/golang.org/x/text/width/trieval.go @@ -0,0 +1,30 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package width + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go new file mode 100644 index 00000000..29c7509b --- /dev/null +++ b/vendor/golang.org/x/text/width/width.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type=Kind +//go:generate go run gen.go gen_common.go gen_trieval.go + +// Package width provides functionality for handling different widths in text. +// +// Wide characters behave like ideographs; they tend to allow line breaks after +// each character and remain upright in vertical text layout. Narrow characters +// are kept together in words or runs that are rotated sideways in vertical text +// layout. +// +// For more information, see https://unicode.org/reports/tr11/. +package width // import "golang.org/x/text/width" + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// TODO +// 1) Reduce table size by compressing blocks. +// 2) API proposition for computing display length +// (approximation, fixed pitch only). +// 3) Implement display length. + +// Kind indicates the type of width property as defined in https://unicode.org/reports/tr11/. +type Kind int + +const ( + // Neutral characters do not occur in legacy East Asian character sets. + Neutral Kind = iota + + // EastAsianAmbiguous characters that can be sometimes wide and sometimes + // narrow and require additional information not contained in the character + // code to further resolve their width. + EastAsianAmbiguous + + // EastAsianWide characters are wide in its usual form. They occur only in + // the context of East Asian typography. These runes may have explicit + // halfwidth counterparts. + EastAsianWide + + // EastAsianNarrow characters are narrow in its usual form. They often have + // fullwidth counterparts. + EastAsianNarrow + + // Note: there exist Narrow runes that do not have fullwidth or wide + // counterparts, despite what the definition says (e.g. U+27E6). + + // EastAsianFullwidth characters have a compatibility decompositions of type + // wide that map to a narrow counterpart. + EastAsianFullwidth + + // EastAsianHalfwidth characters have a compatibility decomposition of type + // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON + // SIGN. + EastAsianHalfwidth + + // Note: there exist runes that have a halfwidth counterparts but that are + // classified as Ambiguous, rather than wide (e.g. U+2190). +) + +// TODO: the generated tries need to return size 1 for invalid runes for the +// width to be computed correctly (each byte should render width 1) + +var trie = newWidthTrie(0) + +// Lookup reports the Properties of the first rune in b and the number of bytes +// of its UTF-8 encoding. +func Lookup(b []byte) (p Properties, size int) { + v, sz := trie.lookup(b) + return Properties{elem(v), b[sz-1]}, sz +} + +// LookupString reports the Properties of the first rune in s and the number of +// bytes of its UTF-8 encoding. +func LookupString(s string) (p Properties, size int) { + v, sz := trie.lookupString(s) + return Properties{elem(v), s[sz-1]}, sz +} + +// LookupRune reports the Properties of rune r. +func LookupRune(r rune) Properties { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + v, _ := trie.lookup(buf[:n]) + last := byte(r) + if r >= utf8.RuneSelf { + last = 0x80 + byte(r&0x3f) + } + return Properties{elem(v), last} +} + +// Properties provides access to width properties of a rune. +type Properties struct { + elem elem + last byte +} + +func (e elem) kind() Kind { + return Kind(e >> typeShift) +} + +// Kind returns the Kind of a rune as defined in Unicode TR #11. +// See https://unicode.org/reports/tr11/ for more details. +func (p Properties) Kind() Kind { + return p.elem.kind() +} + +// Folded returns the folded variant of a rune or 0 if the rune is canonical. +func (p Properties) Folded() rune { + if p.elem&tagNeedsFold != 0 { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Narrow returns the narrow variant of a rune or 0 if the rune is already +// narrow or doesn't have a narrow variant. +func (p Properties) Narrow() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Wide returns the wide variant of a rune or 0 if the rune is already +// wide or doesn't have a wide variant. +func (p Properties) Wide() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// TODO for Properties: +// - Add Fullwidth/Halfwidth or Inverted methods for computing variants +// mapping. +// - Add width information (including information on non-spacing runes). + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +// Reset implements the transform.Transformer interface. +func (t Transformer) Reset() { t.t.Reset() } + +// Transform implements the transform.Transformer interface. +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) { + return t.t.Span(src, atEOF) +} + +// Bytes returns a new byte slice with the result of applying t to b. +func (t Transformer) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(t, b) + return b +} + +// String returns a string with the result of applying t to s. +func (t Transformer) String(s string) string { + s, _, _ = transform.String(t, s) + return s +} + +var ( + // Fold is a transform that maps all runes to their canonical width. + // + // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm + // provide a more generic folding mechanism. + Fold Transformer = Transformer{foldTransform{}} + + // Widen is a transform that maps runes to their wide variant, if + // available. + Widen Transformer = Transformer{wideTransform{}} + + // Narrow is a transform that maps runes to their narrow variant, if + // available. + Narrow Transformer = Transformer{narrowTransform{}} +) + +// TODO: Consider the following options: +// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some +// generalized variant of this. +// - Consider a wide Won character to be the default width (or some generalized +// variant of this). +// - Filter the set of characters that gets converted (the preferred approach is +// to allow applying filters to transforms). diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000..6b7052b8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000..3e4b1953 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,481 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// AddNamedImport(fset, f, "pathpkg", "path") +// adds +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.Position(lastImpspec.Path.ValuePos).Line + line := fset.Position(impspec.Path.ValuePos).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 00000000..cf72ea99 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,477 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +// +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000..76306298 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,14 @@ +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 00000000..98b3987b --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,109 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +// +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "go/types" + "io" + "io/ioutil" + + "golang.org/x/tools/go/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the workspace layout conventions of go/build. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +func Find(importPath, srcDir string) (filename, path string) { + return gcimporter.FindPkg(importPath, srcDir) +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, err := gcimporter.FindExportData(buf) + // If we ever switch to a zip-like archive format with the ToC + // at the end, we can return the correct portion of export data, + // but for now we must return the entire rest of the file. + return buf, err +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// The package name is specified by path. +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The App Engine Go runtime v1.6 uses the old export data format. + // TODO(adonovan): delete once v1.7 has been around for a while. + if bytes.HasPrefix(data, []byte("package ")) { + return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + } + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + b, err := gcimporter.BExportData(fset, pkg) + if err != nil { + return err + } + _, err = out.Write(b) + return err +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 00000000..efe221e7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go new file mode 100644 index 00000000..2713dce6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/main.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// The gcexportdata command is a diagnostic tool that displays the +// contents of gc export data files. +package main + +import ( + "flag" + "fmt" + "go/token" + "go/types" + "log" + "os" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/go/types/typeutil" +) + +var packageFlag = flag.String("package", "", "alternative package to print") + +func main() { + log.SetPrefix("gcexportdata: ") + log.SetFlags(0) + flag.Usage = func() { + fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a") + } + flag.Parse() + if flag.NArg() != 1 { + flag.Usage() + os.Exit(2) + } + filename := flag.Args()[0] + + f, err := os.Open(filename) + if err != nil { + log.Fatal(err) + } + + r, err := gcexportdata.NewReader(f) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Decode the package. + const primary = "" + imports := make(map[string]*types.Package) + fset := token.NewFileSet() + pkg, err := gcexportdata.Read(r, fset, imports, primary) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Optionally select an indirectly mentioned package. + if *packageFlag != "" { + pkg = imports[*packageFlag] + if pkg == nil { + fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n", + filename, *packageFlag) + for p := range imports { + if p != primary { + fmt.Fprintf(os.Stderr, "\t%s\n", p) + } + } + os.Exit(1) + } + } + + // Print all package-level declarations, including non-exported ones. + fmt.Printf("package %s\n", pkg.Name()) + for _, imp := range pkg.Imports() { + fmt.Printf("import %q\n", imp.Path()) + } + qual := func(p *types.Package) string { + if pkg == p { + return "" + } + return p.Name() + } + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + fmt.Printf("%s: %s\n", + fset.Position(obj.Pos()), + types.ObjectString(obj, qual)) + + // For types, print each method. + if _, ok := obj.(*types.TypeName); ok { + for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { + fmt.Printf("%s: %s\n", + fset.Position(method.Obj().Pos()), + types.SelectionString(method, qual)) + } + } + } +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 00000000..a807d0aa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 00000000..3288a0bf --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1037 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line int) token.Pos { + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go new file mode 100644 index 00000000..f33dc561 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +func FindExportData(r *bufio.Reader) (hdr string, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, _, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + hdr = string(line) + + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go new file mode 100644 index 00000000..9cf18660 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -0,0 +1,1078 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, +// but it also contains the original source-based importer code for Go1.6. +// Once we stop supporting 1.6, we can remove that code. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" + +import ( + "bufio" + "errors" + "fmt" + "go/build" + "go/constant" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/scanner" +) + +// debugging/development support +const debug = false + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +// +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + id = path // make sure we have an id to print in error message + return + } + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// ImportData imports a package by reading the gc-generated export data, +// adds the corresponding package object to the packages map indexed by id, +// and returns the object. +// +// The packages map must contains all packages already imported. The data +// reader position must be the beginning of the export data section. The +// filename is only used in error messages. +// +// If packages[id] contains the completely imported package, that package +// can be used directly, and there is no need to call this function (but +// there is also no harm but for extra time used). +// +func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { + // support for parser error handling + defer func() { + switch r := recover().(type) { + case nil: + // nothing to do + case importError: + err = r + default: + panic(r) // internal error + } + }() + + var p parser + p.init(filename, id, data, packages) + pkg = p.parseExport() + + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + buf := bufio.NewReader(rc) + if hdr, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$\n": + // Work-around if we don't have a filename; happens only if lookup != nil. + // Either way, the filename is only needed for importer error messages, so + // this is fine. + if filename == "" { + filename = path + } + return ImportData(packages, filename, id, buf) + + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +// ---------------------------------------------------------------------------- +// Parser + +// TODO(gri) Imported objects don't have position information. +// Ideally use the debug table line info; alternatively +// create some fake position (or the position of the +// import). That way error messages referring to imported +// objects can print meaningful information. + +// parser parses the exports inside a gc compiler-produced +// object/archive file and populates its scope with the results. +type parser struct { + scanner scanner.Scanner + tok rune // current token + lit string // literal string; only valid for Ident, Int, String tokens + id string // package id of imported package + sharedPkgs map[string]*types.Package // package id -> package object (across importer) + localPkgs map[string]*types.Package // package id -> package object (just this package) +} + +func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { + p.scanner.Init(src) + p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + p.scanner.Whitespace = 1<<'\t' | 1<<' ' + p.scanner.Filename = filename // for good error messages + p.next() + p.id = id + p.sharedPkgs = packages + if debug { + // check consistency of packages map + for _, pkg := range packages { + if pkg.Name() == "" { + fmt.Printf("no package name for %s\n", pkg.Path()) + } + } + } +} + +func (p *parser) next() { + p.tok = p.scanner.Scan() + switch p.tok { + case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': + p.lit = p.scanner.TokenText() + default: + p.lit = "" + } + if debug { + fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) + } +} + +func declTypeName(pkg *types.Package, name string) *types.TypeName { + scope := pkg.Scope() + if obj := scope.Lookup(name); obj != nil { + return obj.(*types.TypeName) + } + obj := types.NewTypeName(token.NoPos, pkg, name, nil) + // a named type may be referred to before the underlying type + // is known - set it up + types.NewNamed(obj, nil, nil) + scope.Insert(obj) + return obj +} + +// ---------------------------------------------------------------------------- +// Error handling + +// Internal errors are boxed as importErrors. +type importError struct { + pos scanner.Position + err error +} + +func (e importError) Error() string { + return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) +} + +func (p *parser) error(err interface{}) { + if s, ok := err.(string); ok { + err = errors.New(s) + } + // panic with a runtime.Error if err is not an error + panic(importError{p.scanner.Pos(), err.(error)}) +} + +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Sprintf(format, args...)) +} + +func (p *parser) expect(tok rune) string { + lit := p.lit + if p.tok != tok { + p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) + } + p.next() + return lit +} + +func (p *parser) expectSpecial(tok string) { + sep := 'x' // not white space + i := 0 + for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + i++ + } + if i < len(tok) { + p.errorf("expected %q, got %q", tok, tok[0:i]) + } +} + +func (p *parser) expectKeyword(keyword string) { + lit := p.expect(scanner.Ident) + if lit != keyword { + p.errorf("expected keyword %s, got %q", keyword, lit) + } +} + +// ---------------------------------------------------------------------------- +// Qualified and unqualified names + +// PackageId = string_lit . +// +func (p *parser) parsePackageId() string { + id, err := strconv.Unquote(p.expect(scanner.String)) + if err != nil { + p.error(err) + } + // id == "" stands for the imported package id + // (only known at time of package installation) + if id == "" { + id = p.id + } + return id +} + +// PackageName = ident . +// +func (p *parser) parsePackageName() string { + return p.expect(scanner.Ident) +} + +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +func (p *parser) parseDotIdent() string { + ident := "" + if p.tok != scanner.Int { + sep := 'x' // not white space + for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { + ident += p.lit + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + } + } + if ident == "" { + p.expect(scanner.Ident) // use expect() for error handling + } + return ident +} + +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// +func (p *parser) parseQualifiedName() (id, name string) { + p.expect('@') + id = p.parsePackageId() + p.expect('.') + // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. + if p.tok == '?' { + p.next() + } else { + name = p.parseDotIdent() + } + return +} + +// getPkg returns the package for a given id. If the package is +// not found, create the package and add it to the p.localPkgs +// and p.sharedPkgs maps. name is the (expected) name of the +// package. If name == "", the package name is expected to be +// set later via an import clause in the export data. +// +// id identifies a package, usually by a canonical package path like +// "encoding/json" but possibly by a non-canonical import path like +// "./json". +// +func (p *parser) getPkg(id, name string) *types.Package { + // package unsafe is not in the packages maps - handle explicitly + if id == "unsafe" { + return types.Unsafe + } + + pkg := p.localPkgs[id] + if pkg == nil { + // first import of id from this package + pkg = p.sharedPkgs[id] + if pkg == nil { + // first import of id by this importer; + // add (possibly unnamed) pkg to shared packages + pkg = types.NewPackage(id, name) + p.sharedPkgs[id] = pkg + } + // add (possibly unnamed) pkg to local packages + if p.localPkgs == nil { + p.localPkgs = make(map[string]*types.Package) + } + p.localPkgs[id] = pkg + } else if name != "" { + // package exists already and we have an expected package name; + // make sure names match or set package name if necessary + if pname := pkg.Name(); pname == "" { + pkg.SetName(name) + } else if pname != name { + p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) + } + } + return pkg +} + +// parseExportedName is like parseQualifiedName, but +// the package id is resolved to an imported *types.Package. +// +func (p *parser) parseExportedName() (pkg *types.Package, name string) { + id, name := p.parseQualifiedName() + pkg = p.getPkg(id, "") + return +} + +// ---------------------------------------------------------------------------- +// Types + +// BasicType = identifier . +// +func (p *parser) parseBasicType() types.Type { + id := p.expect(scanner.Ident) + obj := types.Universe.Lookup(id) + if obj, ok := obj.(*types.TypeName); ok { + return obj.Type() + } + p.errorf("not a basic type: %s", id) + return nil +} + +// ArrayType = "[" int_lit "]" Type . +// +func (p *parser) parseArrayType(parent *types.Package) types.Type { + // "[" already consumed and lookahead known not to be "]" + lit := p.expect(scanner.Int) + p.expect(']') + elem := p.parseType(parent) + n, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + p.error(err) + } + return types.NewArray(elem, n) +} + +// MapType = "map" "[" Type "]" Type . +// +func (p *parser) parseMapType(parent *types.Package) types.Type { + p.expectKeyword("map") + p.expect('[') + key := p.parseType(parent) + p.expect(']') + elem := p.parseType(parent) + return types.NewMap(key, elem) +} + +// Name = identifier | "?" | QualifiedName . +// +// For unqualified and anonymous names, the returned package is the parent +// package unless parent == nil, in which case the returned package is the +// package being imported. (The parent package is not nil if the the name +// is an unqualified struct field or interface method name belonging to a +// type declared in another package.) +// +// For qualified names, the returned package is nil (and not created if +// it doesn't exist yet) unless materializePkg is set (which creates an +// unnamed package with valid package path). In the latter case, a +// subsequent import clause is expected to provide a name for the package. +// +func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { + pkg = parent + if pkg == nil { + pkg = p.sharedPkgs[p.id] + } + switch p.tok { + case scanner.Ident: + name = p.lit + p.next() + case '?': + // anonymous + p.next() + case '@': + // exported name prefixed with package path + pkg = nil + var id string + id, name = p.parseQualifiedName() + if materializePkg { + pkg = p.getPkg(id, "") + } + default: + p.error("name expected") + } + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +// Field = Name Type [ string_lit ] . +// +func (p *parser) parseField(parent *types.Package) (*types.Var, string) { + pkg, name := p.parseName(parent, true) + + if name == "_" { + // Blank fields should be package-qualified because they + // are unexported identifiers, but gc does not qualify them. + // Assuming that the ident belongs to the current package + // causes types to change during re-exporting, leading + // to spurious "can't assign A to B" errors from go/types. + // As a workaround, pretend all blank fields belong + // to the same unique dummy package. + const blankpkg = "<_>" + pkg = p.getPkg(blankpkg, blankpkg) + } + + typ := p.parseType(parent) + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + p.errorf("anonymous field expected") + } + anonymous = true + } + tag := "" + if p.tok == scanner.String { + s := p.expect(scanner.String) + var err error + tag, err = strconv.Unquote(s) + if err != nil { + p.errorf("invalid struct tag %s: %s", s, err) + } + } + return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag +} + +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . +// +func (p *parser) parseStructType(parent *types.Package) types.Type { + var fields []*types.Var + var tags []string + + p.expectKeyword("struct") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + fld, tag := p.parseField(parent) + if tag != "" && tags == nil { + tags = make([]string, i) + } + if tags != nil { + tags = append(tags, tag) + } + fields = append(fields, fld) + } + p.expect('}') + + return types.NewStruct(fields, tags) +} + +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// +func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { + _, name := p.parseName(nil, false) + // remove gc-specific parameter numbering + if i := strings.Index(name, "·"); i >= 0 { + name = name[:i] + } + if p.tok == '.' { + p.expectSpecial("...") + isVariadic = true + } + typ := p.parseType(nil) + if isVariadic { + typ = types.NewSlice(typ) + } + // ignore argument tag (e.g. "noescape") + if p.tok == scanner.String { + p.next() + } + // TODO(gri) should we provide a package? + par = types.NewVar(token.NoPos, nil, name, typ) + return +} + +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . +// +func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { + p.expect('(') + for p.tok != ')' && p.tok != scanner.EOF { + if len(list) > 0 { + p.expect(',') + } + par, variadic := p.parseParameter() + list = append(list, par) + if variadic { + if isVariadic { + p.error("... not on final argument") + } + isVariadic = true + } + } + p.expect(')') + + return +} + +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . +// +func (p *parser) parseSignature(recv *types.Var) *types.Signature { + params, isVariadic := p.parseParameters() + + // optional result type + var results []*types.Var + if p.tok == '(' { + var variadic bool + results, variadic = p.parseParameters() + if variadic { + p.error("... not permitted on result type") + } + } + + return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) +} + +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . +// +// The methods of embedded interfaces are always "inlined" +// by the compiler and thus embedded interfaces are never +// visible in the export data. +// +func (p *parser) parseInterfaceType(parent *types.Package) types.Type { + var methods []*types.Func + + p.expectKeyword("interface") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + pkg, name := p.parseName(parent, true) + sig := p.parseSignature(nil) + methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) + } + p.expect('}') + + // Complete requires the type's embedded interfaces to be fully defined, + // but we do not define any + return types.NewInterface(methods, nil).Complete() +} + +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// +func (p *parser) parseChanType(parent *types.Package) types.Type { + dir := types.SendRecv + if p.tok == scanner.Ident { + p.expectKeyword("chan") + if p.tok == '<' { + p.expectSpecial("<-") + dir = types.SendOnly + } + } else { + p.expectSpecial("<-") + p.expectKeyword("chan") + dir = types.RecvOnly + } + elem := p.parseType(parent) + return types.NewChan(dir, elem) +} + +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . +// +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . +// +func (p *parser) parseType(parent *types.Package) types.Type { + switch p.tok { + case scanner.Ident: + switch p.lit { + default: + return p.parseBasicType() + case "struct": + return p.parseStructType(parent) + case "func": + // FuncType + p.next() + return p.parseSignature(nil) + case "interface": + return p.parseInterfaceType(parent) + case "map": + return p.parseMapType(parent) + case "chan": + return p.parseChanType(parent) + } + case '@': + // TypeName + pkg, name := p.parseExportedName() + return declTypeName(pkg, name).Type() + case '[': + p.next() // look ahead + if p.tok == ']' { + // SliceType + p.next() + return types.NewSlice(p.parseType(parent)) + } + return p.parseArrayType(parent) + case '*': + // PointerType + p.next() + return types.NewPointer(p.parseType(parent)) + case '<': + return p.parseChanType(parent) + case '(': + // "(" Type ")" + p.next() + typ := p.parseType(parent) + p.expect(')') + return typ + } + p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) + return nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// ImportDecl = "import" PackageName PackageId . +// +func (p *parser) parseImportDecl() { + p.expectKeyword("import") + name := p.parsePackageName() + p.getPkg(p.parsePackageId(), name) +} + +// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// +func (p *parser) parseInt() string { + s := "" + switch p.tok { + case '-': + s = "-" + p.next() + case '+': + p.next() + } + return s + p.expect(scanner.Int) +} + +// number = int_lit [ "p" int_lit ] . +// +func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { + // mantissa + mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) + if mant == nil { + panic("invalid mantissa") + } + + if p.lit == "p" { + // exponent (base 2) + p.next() + exp, err := strconv.ParseInt(p.parseInt(), 10, 0) + if err != nil { + p.error(err) + } + if exp < 0 { + denom := constant.MakeInt64(1) + denom = constant.Shift(denom, token.SHL, uint(-exp)) + typ = types.Typ[types.UntypedFloat] + val = constant.BinaryOp(mant, token.QUO, denom) + return + } + if exp > 0 { + mant = constant.Shift(mant, token.SHL, uint(exp)) + } + typ = types.Typ[types.UntypedFloat] + val = mant + return + } + + typ = types.Typ[types.UntypedInt] + val = mant + return +} + +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . +// +func (p *parser) parseConstDecl() { + p.expectKeyword("const") + pkg, name := p.parseExportedName() + + var typ0 types.Type + if p.tok != '=' { + // constant types are never structured - no need for parent type + typ0 = p.parseType(nil) + } + + p.expect('=') + var typ types.Type + var val constant.Value + switch p.tok { + case scanner.Ident: + // bool_lit + if p.lit != "true" && p.lit != "false" { + p.error("expected true or false") + } + typ = types.Typ[types.UntypedBool] + val = constant.MakeBool(p.lit == "true") + p.next() + + case '-', scanner.Int: + // int_lit + typ, val = p.parseNumber() + + case '(': + // complex_lit or rune_lit + p.next() + if p.tok == scanner.Char { + p.next() + p.expect('+') + typ = types.Typ[types.UntypedRune] + _, val = p.parseNumber() + p.expect(')') + break + } + _, re := p.parseNumber() + p.expect('+') + _, im := p.parseNumber() + p.expectKeyword("i") + p.expect(')') + typ = types.Typ[types.UntypedComplex] + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + case scanner.Char: + // rune_lit + typ = types.Typ[types.UntypedRune] + val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) + p.next() + + case scanner.String: + // string_lit + typ = types.Typ[types.UntypedString] + val = constant.MakeFromLiteral(p.lit, token.STRING, 0) + p.next() + + default: + p.errorf("expected literal got %s", scanner.TokenString(p.tok)) + } + + if typ0 == nil { + typ0 = typ + } + + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) +} + +// TypeDecl = "type" ExportedName Type . +// +func (p *parser) parseTypeDecl() { + p.expectKeyword("type") + pkg, name := p.parseExportedName() + obj := declTypeName(pkg, name) + + // The type object may have been imported before and thus already + // have a type associated with it. We still need to parse the type + // structure, but throw it away if the object already has a type. + // This ensures that all imports refer to the same type object for + // a given type declaration. + typ := p.parseType(pkg) + + if name := obj.Type().(*types.Named); name.Underlying() == nil { + name.SetUnderlying(typ) + } +} + +// VarDecl = "var" ExportedName Type . +// +func (p *parser) parseVarDecl() { + p.expectKeyword("var") + pkg, name := p.parseExportedName() + typ := p.parseType(pkg) + pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) +} + +// Func = Signature [ Body ] . +// Body = "{" ... "}" . +// +func (p *parser) parseFunc(recv *types.Var) *types.Signature { + sig := p.parseSignature(recv) + if p.tok == '{' { + p.next() + for i := 1; i > 0; p.next() { + switch p.tok { + case '{': + i++ + case '}': + i-- + } + } + } + return sig +} + +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// +func (p *parser) parseMethodDecl() { + // "func" already consumed + p.expect('(') + recv, _ := p.parseParameter() // receiver + p.expect(')') + + // determine receiver base type object + base := deref(recv.Type()).(*types.Named) + + // parse method name, signature, and possibly inlined body + _, name := p.parseName(nil, false) + sig := p.parseFunc(recv) + + // methods always belong to the same package as the base type object + pkg := base.Obj().Pkg() + + // add method to type unless type was imported before + // and method exists already + // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. + base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) +} + +// FuncDecl = "func" ExportedName Func . +// +func (p *parser) parseFuncDecl() { + // "func" already consumed + pkg, name := p.parseExportedName() + typ := p.parseFunc(nil) + pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) +} + +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// +func (p *parser) parseDecl() { + if p.tok == scanner.Ident { + switch p.lit { + case "import": + p.parseImportDecl() + case "const": + p.parseConstDecl() + case "type": + p.parseTypeDecl() + case "var": + p.parseVarDecl() + case "func": + p.next() // look ahead + if p.tok == '(' { + p.parseMethodDecl() + } else { + p.parseFuncDecl() + } + } + } + p.expect('\n') +} + +// ---------------------------------------------------------------------------- +// Export + +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . +// +func (p *parser) parseExport() *types.Package { + p.expectKeyword("package") + name := p.parsePackageName() + if p.tok == scanner.Ident && p.lit == "safe" { + // package was compiled with -u option - ignore + p.next() + } + p.expect('\n') + + pkg := p.getPkg(p.id, name) + + for p.tok != '$' && p.tok != scanner.EOF { + p.parseDecl() + } + + if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { + // don't call next()/expect() since reading past the + // export data may cause scanner errors (e.g. NUL chars) + p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) + } + + if n := p.scanner.ErrorCount; n != 0 { + p.errorf("expected no scanner errors, got %d", n) + } + + // Record all locally referenced packages as imports. + var imports []*types.Package + for id, pkg2 := range p.localPkgs { + if pkg2.Name() == "" { + p.errorf("%s package has no name", id) + } + if id == p.id { + continue // avoid self-edge + } + imports = append(imports, pkg2) + } + sort.Sort(byPath(imports)) + pkg.SetImports(imports) + + // package was imported completely and without errors + pkg.MarkComplete() + + return pkg +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 00000000..be671c79 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,723 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +// +build go1.11 + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// IExportData returns the binary export data for pkg. +// If no file set is provided, position info will be missing. +func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + out: bytes.NewBuffer(nil), + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex, pkg) + w.flush() + + // Assemble header. + var hdr intWriter + hdr.WriteByte('i') + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(p.out, &hdr) + io.Copy(p.out, &p.strings) + io.Copy(p.out, &p.data0) + + return p.out.Bytes(), nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + pkgObjs[localpkg] = nil + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return pkgs[i].Path() < pkgs[j].Path() + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(pkg.Path()) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(pkg.Path()) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Embedded()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch v.Kind() { + case constant.Bool: + w.bool(constant.BoolVal(v)) + case constant.Int: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case constant.Float: + f := constantToFloat(v) + w.mpfloat(f, typ) + case constant.Complex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case constant.String: + w.string(constant.StringVal(v)) + case constant.Unknown: + // package contains type errors + default: + panic(internalErrorf("unexpected value %v (%T)", v, v)) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + assert(x.Kind() == constant.Float) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go new file mode 100644 index 00000000..3cb7ae5b --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -0,0 +1,606 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "sort" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType +) + +// IImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + const currentVersion = 0 + version := -1 + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{bytes.NewReader(data), path} + + version = int(r.uint64()) + switch version { + case currentVersion: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + ipath: path, + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + var localpkg *types.Package + for _, pkg := range pkgList { + if pkg.Path() == path { + localpkg = pkg + } + } + + names := make([]string, 0, len(p.pkgIndex[localpkg])) + for name := range p.pkgIndex[localpkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(localpkg, name) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + localpkg.SetImports(list) + + // package was imported completely and without errors + localpkg.MarkComplete() + + consumed, _ := r.Seek(0, io.SeekCurrent) + return int(consumed), localpkg, nil +} + +type iimporter struct { + ipath string + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + + fake fakeFileSet + interfaceList []*types.Interface +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if base == nil || !isInterface(t) { + p.typCache[off] = t + } + return t +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F': + sig := r.signature(nil) + + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + r.declare(obj) + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + msig := r.signature(recv) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + val = r.mpint(b) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(b *types.Basic) constant.Value { + signed, maxBytes := intSize(b) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + return constant.MakeInt64(v) + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + + buf := make([]byte, v) + io.ReadFull(&r.declReader, buf) + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { + buf[i], buf[j] = buf[j], buf[i] + } + + x := constant.MakeFromBytes(buf) + if signed && n&1 != 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +func (r *importReader) mpfloat(b *types.Basic) constant.Value { + x := r.mpint(b) + if constant.Sign(x) == 0 { + return x + } + + exp := r.int64() + switch { + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + } + return x +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } + + if r.prevFile == "" && r.prevLine == 0 { + return token.NoPos + } + + return r.p.fake.pos(r.prevFile, int(r.prevLine)) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) types.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignature(recv, params, results, variadic) +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go new file mode 100644 index 00000000..463f2522 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go new file mode 100644 index 00000000..ab28b95c --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 00000000..ea15d57b --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,173 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "log" + "os" + "os/exec" + "strings" + "time" +) + +var debug = false + +// GetSizes returns the sizes used by the underlying driver with the given parameters. +func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver. + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + // We did not find the driver, so use "go list". + tool = "off" + } + } + + if tool == "off" { + return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData) + } + + req, err := json.Marshal(struct { + Command string `json:"command"` + Env []string `json:"env"` + BuildFlags []string `json:"build_flags"` + }{ + Command: "sizes", + Env: env, + BuildFlags: buildFlags, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, tool) + cmd.Dir = dir + cmd.Env = env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + } + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return response.Sizes, nil +} + +func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} + args = append(args, buildFlags...) + args = append(args, "--", "unsafe") + stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...) + var goarch, compiler string + if err != nil { + if strings.Contains(err.Error(), "cannot find main module") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + envout, enverr := InvokeGo(ctx, env, dir, usesExportData, "env", "GOARCH") + if enverr != nil { + return nil, err + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, err + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not determine GOARCH and Go compiler") + } + goarch = fields[0] + compiler = fields[1] + } + return types.SizesFor(compiler, goarch), nil +} + +// InvokeGo returns the stdout of a go command invocation. +func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) { + if debug { + defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) + } + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, env...), "PWD="+dir) + cmd.Dir = dir + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - executable not found + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + if !usesExportData { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) + } + + return stdout, nil +} + +func cmdDebugStr(envlist []string, args ...string) string { + env := make(map[string]string) + for _, kv := range envlist { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 00000000..3799f8ed --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,222 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages, with each mode returning all the data of the +previous mode with some extra added. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. + +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 00000000..b696b687 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 00000000..6b341b7e --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1021 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/semver" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +// init fills in r with a driverResponse. +func (r *responseDeduper) init(dr *driverResponse) { + r.dr = dr + r.seenRoots = map[string]bool{} + r.seenPackages = map[string]*Package{} + for _, pkg := range dr.Packages { + r.seenPackages[pkg.ID] = pkg + } + for _, root := range dr.Roots { + r.seenRoots[root] = true + } +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +// goInfo contains global information from the go tool. +type goInfo struct { + rootDirs map[string]string + env goEnv +} + +type goEnv struct { + modulesOn bool +} + +func determineEnv(cfg *Config) goEnv { + buf, err := invokeGo(cfg, "env", "GOMOD") + if err != nil { + return goEnv{} + } + gomod := bytes.TrimSpace(buf.Bytes()) + + env := goEnv{} + env.modulesOn = len(gomod) > 0 + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + var sizes types.Sizes + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + sizes, sizeserr = getSizes(cfg) + sizeswg.Done() + }() + } + + // start fetching rootDirs + var info goInfo + var rootDirsReady, envReady = make(chan struct{}), make(chan struct{}) + go func() { + info.rootDirs = determineRootDirs(cfg) + close(rootDirsReady) + }() + go func() { + info.env = determineEnv(cfg) + close(envReady) + }() + getGoInfo := func() *goInfo { + <-rootDirsReady + <-envReady + return &info + } + + // always pass getGoInfo to golistDriver + golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { + return golistDriver(cfg, getGoInfo, patterns...) + } + + // Determine files requested in contains patterns + var containFiles []string + var packagesNamed []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "iamashamedtousethedisabledqueryname": + packagesNamed = append(packagesNamed, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + response := &responseDeduper{} + var err error + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := golistDriver(cfg, restPatterns...) + if err != nil { + return nil, err + } + response.init(dr) + } else { + response.init(&driverResponse{}) + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + // types.SizesFor always returns nil or a *types.StdSizes + response.dr.Sizes, _ = sizes.(*types.StdSizes) + + var containsCandidates []string + + if len(containFiles) != 0 { + if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil { + return nil, err + } + } + + if len(packagesNamed) != 0 { + if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { + return nil, err + } + } + + modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + if err != nil { + return nil, err + } + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{ + { + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }, + }, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + + return response.dr, nil +} + +func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error { + if len(pkgs) == 0 { + return nil + } + drivercfg := *cfg + if getGoInfo().env.modulesOn { + drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly") + } + dr, err := driver(&drivercfg, pkgs...) + + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + if err != nil { + return err + } + if err := addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo); err != nil { + return err + } + return nil +} + +func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := driver(cfg, pattern) + if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) { + // There was an error loading the package. Try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're in modules mode + // and the ad-hoc is located outside a module. + var queryErr error + dirResponse, queryErr = driver(cfg, query) + if queryErr != nil { + // Return the original error if the attempt to fall back failed. + return err + } + // Special case to handle issue #33482: + // If this is a file= query for ad-hoc packages where the file only exists on an overlay, + // and exists outside of a module, add the file in for the package. + if len(dirResponse.Packages) == 1 && len(dirResponse.Packages) == 1 && + dirResponse.Packages[0].ID == "command-line-arguments" && len(dirResponse.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range cfg.Overlay { + if path == filename { + dirResponse.Packages[0].Errors = nil + dirResponse.Packages[0].GoFiles = []string{path} + dirResponse.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { + // calling `go env` isn't free; bail out if there's nothing to do. + if len(queries) == 0 { + return nil + } + // Determine which directories are relevant to scan. + roots, modRoot, err := roots(cfg) + if err != nil { + return err + } + + // Scan the selected directories. Simple matches, from GOPATH/GOROOT + // or the local module, can simply be "go list"ed. Matches from the + // module cache need special treatment. + var matchesMu sync.Mutex + var simpleMatches, modCacheMatches []string + add := func(root gopathwalk.Root, dir string) { + // Walk calls this concurrently; protect the result slices. + matchesMu.Lock() + defer matchesMu.Unlock() + + path := dir + if dir != root.Path { + path = dir[len(root.Path)+1:] + } + if pathMatchesQueries(path, queries) { + switch root.Type { + case gopathwalk.RootModuleCache: + modCacheMatches = append(modCacheMatches, path) + case gopathwalk.RootCurrentModule: + // We'd need to read go.mod to find the full + // import path. Relative's easier. + rel, err := filepath.Rel(cfg.Dir, dir) + if err != nil { + // This ought to be impossible, since + // we found dir in the current module. + panic(err) + } + simpleMatches = append(simpleMatches, "./"+rel) + case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: + simpleMatches = append(simpleMatches, path) + } + } + } + + startWalk := time.Now() + gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) + cfg.Logf("%v for walk", time.Since(startWalk)) + + // Weird special case: the top-level package in a module will be in + // whatever directory the user checked the repository out into. It's + // more reasonable for that to not match the package name. So, if there + // are any Go files in the mod root, query it just to be safe. + if modRoot != "" { + rel, err := filepath.Rel(cfg.Dir, modRoot) + if err != nil { + panic(err) // See above. + } + + files, err := ioutil.ReadDir(modRoot) + for _, f := range files { + if strings.HasSuffix(f.Name(), ".go") { + simpleMatches = append(simpleMatches, rel) + break + } + } + } + + addResponse := func(r *driverResponse) { + for _, pkg := range r.Packages { + response.addPackage(pkg) + for _, name := range queries { + if pkg.Name == name { + response.addRoot(pkg.ID) + break + } + } + } + } + + if len(simpleMatches) != 0 { + resp, err := driver(cfg, simpleMatches...) + if err != nil { + return err + } + addResponse(resp) + } + + // Module cache matches are tricky. We want to avoid downloading new + // versions of things, so we need to use the ones present in the cache. + // go list doesn't accept version specifiers, so we have to write out a + // temporary module, and do the list in that module. + if len(modCacheMatches) != 0 { + // Collect all the matches, deduplicating by major version + // and preferring the newest. + type modInfo struct { + mod string + major string + } + mods := make(map[modInfo]string) + var imports []string + for _, modPath := range modCacheMatches { + matches := modCacheRegexp.FindStringSubmatch(modPath) + mod, ver := filepath.ToSlash(matches[1]), matches[2] + importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) + + major := semver.Major(ver) + if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { + mods[modInfo{mod, major}] = ver + } + + imports = append(imports, importPath) + } + + // Build the temporary module. + var gomod bytes.Buffer + gomod.WriteString("module modquery\nrequire (\n") + for mod, version := range mods { + gomod.WriteString("\t" + mod.mod + " " + version + "\n") + } + gomod.WriteString(")\n") + + tmpCfg := *cfg + + // We're only trying to look at stuff in the module cache, so + // disable the network. This should speed things up, and has + // prevented errors in at least one case, #28518. + tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...)) + + var err error + tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") + if err != nil { + return err + } + defer os.RemoveAll(tmpCfg.Dir) + + if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { + return fmt.Errorf("writing go.mod for module cache query: %v", err) + } + + // Run the query, using the import paths calculated from the matches above. + resp, err := driver(&tmpCfg, imports...) + if err != nil { + return fmt.Errorf("querying module cache matches: %v", err) + } + addResponse(resp) + } + + return nil +} + +func getSizes(cfg *Config) (types.Sizes, error) { + return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) +} + +// roots selects the appropriate paths to walk based on the passed-in configuration, +// particularly the environment and the presence of a go.mod in cfg.Dir's parents. +func roots(cfg *Config) ([]gopathwalk.Root, string, error) { + stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") + if err != nil { + return nil, "", err + } + + fields := strings.Split(stdout.String(), "\n") + if len(fields) != 4 || len(fields[3]) != 0 { + return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) + } + goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] + var modDir string + if gomod != "" { + modDir = filepath.Dir(gomod) + } + + var roots []gopathwalk.Root + // Always add GOROOT. + roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT}) + // If modules are enabled, scan the module dir. + if modDir != "" { + roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule}) + } + // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. + for _, p := range gopath { + if modDir != "" { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) + } else { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH}) + } + } + + return roots, modDir, nil +} + +// These functions were copied from goimports. See further documentation there. + +// pathMatchesQueries is adapted from pkgIsCandidate. +// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? +func pathMatchesQueries(path string, queries []string) bool { + lastTwo := lastTwoComponents(path) + for _, query := range queries { + if strings.Contains(lastTwo, query) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, query) { + return true + } + } + } + return false +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *jsonPackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// golistDriver uses the "go list" command to expand the pattern +// words and return metadata for the specified packages. dir may be +// "" and env may be nil, as per os/exec.Command. +func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + buf, err := invokeGo(cfg, golistargs(cfg, words)...) + if err != nil { + return nil, err + } + seen := make(map[string]*jsonPackage) + // Decode the JSON and convert it to Package form. + var response driverResponse + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok := getPkgPath(p.ImportPath, rootsDirs) + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + // skip the duplicate + continue + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Filter out any elements of CompiledGoFiles that are also in OtherFiles. + // We have to keep this workaround in place until go1.12 is a distant memory. + if len(pkg.OtherFiles) > 0 { + other := make(map[string]bool, len(pkg.OtherFiles)) + for _, f := range pkg.OtherFiles { + other[f] = true + } + + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if other[f] { + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + if p.Error != nil { + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363. + }) + } + + response.Packages = append(response.Packages, pkg) + } + + return &response, nil +} + +// getPkgPath finds the package path of a directory if it's relative to a root directory. +func getPkgPath(dir string, goInfo func() *goInfo) (string, bool) { + for rdir, rpath := range goInfo().rootDirs { + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only ore root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true + } + } + return "", false +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "list", "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// invokeGo returns the stdout of a go command invocation. +func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) + cmd.Dir = cfg.Dir + cmd.Stdout = stdout + cmd.Stderr = stderr + defer func(start time.Time) { + cfg.Logf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr) + }(time.Now()) + + if err := cmd.Run(); err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, fmt.Errorf("%s", stderr.String()) + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + strings.IndexRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) == -1 + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout) + } + + return stdout, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd, args ...string) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + var quotedArgs []string + for _, arg := range args { + quotedArgs = append(quotedArgs, strconv.Quote(arg)) + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 00000000..308e79c5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,310 @@ +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "go/parser" + "go/token" + "path" + "path/filepath" + "strconv" + "strings" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + for opath, contents := range cfg.Overlay { + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // If we've already seen the test variant, + // make sure to label which package it is a test variant of. + if hasTestFiles(pkg) { + testVariantOf = p + continue nextPackage + } + // If we have already seen the package of which this is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package. + if pkg == nil { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + var pkgPath string + for rdir, rpath := range rootDirs().rootDirs { + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + pkgPath = filepath.ToSlash(r) + if rpath != "" { + pkgPath = path.Join(rpath, pkgPath) + } + // We only create one new package even it can belong in multiple modules or GOPATH entries. + // This is okay because tools (such as the LSP) that use overlays will recompute the overlay + // once the file is saved, and golist will do the right thing. + // TODO(matloob): Implement module tiebreaking? + break + } + if pkgPath == "" { + continue + } + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + pkgPath += "_test" + } + id := pkgPath + if isTestFile && !isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + // Try to reclaim a package with the same id if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package + if pkg == nil { + pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)} + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + _, found := pkg.Imports[imp] + if !found { + overlayAddsImports = true + // TODO(matloob): Handle cases when the following block isn't correct. + // These include imports of vendored packages, etc. + id, ok := havePkgs[imp] + if !ok { + id = imp + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as wel. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + continue + } + + // toPkgPath tries to guess the package path given the id. + // This isn't always correct -- it's certainly wrong for + // vendored packages' paths. + toPkgPath := func(id string) string { + // TODO(matloob): Handle vendor paths. + i := strings.IndexByte(id, ' ') + if i >= 0 { + return id[:i] + } + return id + } + + // Do another pass now that new packages have been created to determine the + // set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + pkgPath := toPkgPath(imp.ID) + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from directories code can be contained in to the +// corresponding import path prefixes of those directories. +// Its result is used to try to determine the import path for a package containing +// an overlay file. +func determineRootDirs(cfg *Config) map[string]string { + // Assume modules first: + out, err := invokeGo(cfg, "list", "-m", "-json", "all") + if err != nil { + return determineRootDirsGOPATH(cfg) + } + m := map[string]string{} + type jsonMod struct{ Path, Dir string } + for dec := json.NewDecoder(out); dec.More(); { + mod := new(jsonMod) + if err := dec.Decode(mod); err != nil { + return m // Give up and return an empty map. Package won't be found for overlay. + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + m[mod.Dir] = mod.Path + } + } + return m +} + +func determineRootDirsGOPATH(cfg *Config) map[string]string { + m := map[string]string{} + out, err := invokeGo(cfg, "env", "GOPATH") + if err != nil { + // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. + // When we try to find the import path for a directory, there will be no root-dir match and + // we'll give up. + return m + } + for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { + m[filepath.Join(p, "src")] = "" + } + return m +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 00000000..b29c9136 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1113 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + + "golang.org/x/tools/go/gcexportdata" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportsFile adds ExportsFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response.Roots, response.Packages...) +} + +// defaultDriver is a driver that looks for an external driver binary, and if +// it does not find it falls back to the built in go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + return driver(cfg, patterns...) +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that were presented to the compiler. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range list { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + lpkg := &loaderPackage{ + Package: pkg, + needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0, + needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || + len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files + pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, for extra de-Hyrum-ization. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + if len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + ld.loadFromExportData(lpkg) + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in non-initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if viewLen != len(view) { + log.Fatalf("Unexpected package creation during export data loading") + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { + // If NeedTypesInfo, go/packages needs to do typechecking itself so it can + // associate type info with the AST. To do so, we need the export data + // for dependencies, which means we need to ask for the direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { + // With NeedDeps we need to load at least direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 00000000..b13cb081 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,55 @@ +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go new file mode 100644 index 00000000..eef25969 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -0,0 +1,62 @@ +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports // import "golang.org/x/tools/imports" + +import ( + "go/build" + + intimp "golang.org/x/tools/internal/imports" +) + +// Options specifies options for processing files. +type Options struct { + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Debug controls verbose logging. +var Debug = false + +// LocalPrefix is a comma-separated string of import path prefixes, which, if +// set, instructs Process to sort the import paths with the given prefixes +// into another group after 3rd-party packages. +var LocalPrefix string + +// Process formats and adjusts imports for the provided file. +// If opt is nil the defaults are used. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +// To process data ``as if'' it were in filename, pass the data as a non-nil src. +func Process(filename string, src []byte, opt *Options) ([]byte, error) { + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } + intopt := &intimp.Options{ + Env: &intimp.ProcessEnv{ + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + Debug: Debug, + LocalPrefix: LocalPrefix, + }, + AllErrors: opt.AllErrors, + Comments: opt.Comments, + FormatOnly: opt.FormatOnly, + Fragment: opt.Fragment, + TabIndent: opt.TabIndent, + TabWidth: opt.TabWidth, + } + return intimp.Process(filename, src, intopt) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + return intimp.VendorlessPath(ipath) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 00000000..7219c8e9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of filepath.Walk for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// TraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var SkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == TraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 00000000..ccffec5a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 00000000..ab7fbc0a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 00000000..a3b26a7b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 00000000..e880d358 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build !appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + const nameBufLen = uint16(len(nameBuf)) + limit := dirent.Reclen - fixedHdr + if limit > nameBufLen { + limit = nameBufLen + } + nameLen := bytes.IndexByte(nameBuf[:limit], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 00000000..a906b875 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 00000000..3369b1a0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := syscall.Open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/15653 + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 00000000..60eb67b6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,268 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/internal/fastwalk" +) + +// Options controls the behavior of a Walk call. +type Options struct { + Debug bool // Enable debug logging + ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules. +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. +func SrcDirsRoots(ctx *build.Context) []Root { + var roots []Root + roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT}) + for _, p := range filepath.SplitList(ctx.GOPATH) { + roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) + } + return roots +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + WalkSkip(roots, add, func(Root, string) bool { return false }, opts) +} + +// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// For each directory that will be scanned, skip will be called (concurrently) +// with the absolute paths of the containing source directory and the directory. +// If skip returns false on a directory it will be processed. +// add will be called concurrently. +// skip will be called concurrently. +func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { + for _, root := range roots { + walkDir(root, add, skip, opts) + } +} + +func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Debug { + log.Printf("skipping nonexistent directory: %v", root.Path) + } + return + } + if opts.Debug { + log.Printf("scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + skip: skip, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + } + + if opts.Debug { + log.Printf("scanned %s", root.Path) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options. +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Debug { + log.Printf("Directory added to ignore list: %s", full) + } + } else if w.opts.Debug { + log.Printf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if w.opts.Debug { + if err != nil { + log.Print(err) + } else { + log.Printf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + if w.skip != nil { + // Check with the user specified callback. + return w.skip(w.root, dir) + } + return false +} + +func (w *walker) walk(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.SkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.SkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi, path) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if w.shouldTraverse(dir, fi) { + return fastwalk.TraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts, dir) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go new file mode 100644 index 00000000..bcfbb07e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -0,0 +1,1385 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/gopathwalk" +) + +// importToGroup is a list of functions which map from an import path to +// a group number. +var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool){ + func(env *ProcessEnv, importPath string) (num int, ok bool) { + if env.LocalPrefix == "" { + return + } + for _, p := range strings.Split(env.LocalPrefix, ",") { + if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { + return 3, true + } + } + return + }, + func(_ *ProcessEnv, importPath string) (num int, ok bool) { + if strings.HasPrefix(importPath, "appengine") { + return 2, true + } + return + }, + func(_ *ProcessEnv, importPath string) (num int, ok bool) { + if strings.Contains(importPath, ".") { + return 1, true + } + return + }, +} + +func importGroup(env *ProcessEnv, importPath string) int { + for _, fn := range importToGroup { + if n, ok := fn(env, importPath); ok { + return n + } + } + return 0 +} + +type ImportFixType int + +const ( + AddImport ImportFixType = iota + DeleteImport + SetImportName +) + +type ImportFix struct { + // StmtInfo represents the import statement this fix will add, remove, or change. + StmtInfo ImportInfo + // IdentName is the identifier that this fix will add or remove. + IdentName string + // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). + FixType ImportFixType +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A packageInfo represents what's known about a package. +type packageInfo struct { + name string // real package name, if known. + exports map[string]bool // known exports. +} + +// parseOtherFiles parses all the Go files in srcDir except filename, including +// test files if filename looks like a test. +func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { + // This could use go/packages but it doesn't buy much, and it fails + // with https://golang.org/issue/26296 in LoadFiles mode in some cases. + considerTests := strings.HasSuffix(filename, "_test.go") + + fileBase := filepath.Base(filename) + packageFileInfos, err := ioutil.ReadDir(srcDir) + if err != nil { + return nil + } + + var files []*ast.File + for _, fi := range packageFileInfos { + if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { + continue + } + + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + if err != nil { + continue + } + + files = append(files, f) + } + + return files +} + +// addGlobals puts the names of package vars into the provided map. +func addGlobals(f *ast.File, globals map[string]bool) { + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + globals[valueSpec.Names[0].Name] = true + } + } +} + +// collectReferences builds a map of selector expressions, from +// left hand side (X) to a set of right hand sides (Sel). +func collectReferences(f *ast.File) references { + refs := references{} + + var visitor visitFn + visitor = func(node ast.Node) ast.Visitor { + if node == nil { + return visitor + } + switch v := node.(type) { + case *ast.SelectorExpr: + xident, ok := v.X.(*ast.Ident) + if !ok { + break + } + if xident.Obj != nil { + // If the parser can resolve it, it's not a package ref. + break + } + if !ast.IsExported(v.Sel.Name) { + // Whatever this is, it's not exported from a package. + break + } + pkgName := xident.Name + r := refs[pkgName] + if r == nil { + r = make(map[string]bool) + refs[pkgName] = r + } + r[v.Sel.Name] = true + } + return visitor + } + ast.Walk(visitor, f) + return refs +} + +// collectImports returns all the imports in f. +// Unnamed imports (., _) and "C" are ignored. +func collectImports(f *ast.File) []*ImportInfo { + var imports []*ImportInfo + for _, imp := range f.Imports { + var name string + if imp.Name != nil { + name = imp.Name.Name + } + if imp.Path.Value == `"C"` || name == "_" || name == "." { + continue + } + path := strings.Trim(imp.Path.Value, `"`) + imports = append(imports, &ImportInfo{ + Name: name, + ImportPath: path, + }) + } + return imports +} + +// findMissingImport searches pass's candidates for an import that provides +// pkg, containing all of syms. +func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { + for _, candidate := range p.candidates { + pkgInfo, ok := p.knownPackages[candidate.ImportPath] + if !ok { + continue + } + if p.importIdentifier(candidate) != pkg { + continue + } + + allFound := true + for right := range syms { + if !pkgInfo.exports[right] { + allFound = false + break + } + } + + if allFound { + return candidate + } + } + return nil +} + +// references is set of references found in a Go file. The first map key is the +// left hand side of a selector expression, the second key is the right hand +// side, and the value should always be true. +type references map[string]map[string]bool + +// A pass contains all the inputs and state necessary to fix a file's imports. +// It can be modified in some ways during use; see comments below. +type pass struct { + // Inputs. These must be set before a call to load, and not modified after. + fset *token.FileSet // fset used to parse f and its siblings. + f *ast.File // the file being fixed. + srcDir string // the directory containing f. + env *ProcessEnv // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + + // Intermediate state, generated by load. + existingImports map[string]*ImportInfo + allRefs references + missingRefs references + + // Inputs to fix. These can be augmented between successive fix calls. + lastTry bool // indicates that this is the last call and fix should clean up as best it can. + candidates []*ImportInfo // candidate imports in priority order. + knownPackages map[string]*packageInfo // information about all known packages. +} + +// loadPackageNames saves the package names for everything referenced by imports. +func (p *pass) loadPackageNames(imports []*ImportInfo) error { + if p.env.Debug { + p.env.Logf("loading package names for %v packages", len(imports)) + defer func() { + p.env.Logf("done loading package names for %v packages", len(imports)) + }() + } + var unknown []string + for _, imp := range imports { + if _, ok := p.knownPackages[imp.ImportPath]; ok { + continue + } + unknown = append(unknown, imp.ImportPath) + } + + names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir) + if err != nil { + return err + } + + for path, name := range names { + p.knownPackages[path] = &packageInfo{ + name: name, + exports: map[string]bool{}, + } + } + return nil +} + +// importIdentifier returns the identifier that imp will introduce. It will +// guess if the package name has not been loaded, e.g. because the source +// is not available. +func (p *pass) importIdentifier(imp *ImportInfo) string { + if imp.Name != "" { + return imp.Name + } + known := p.knownPackages[imp.ImportPath] + if known != nil && known.name != "" { + return known.name + } + return importPathToAssumedName(imp.ImportPath) +} + +// load reads in everything necessary to run a pass, and reports whether the +// file already has all the imports it needs. It fills in p.missingRefs with the +// file's missing symbols, if any, or removes unused imports if not. +func (p *pass) load() ([]*ImportFix, bool) { + p.knownPackages = map[string]*packageInfo{} + p.missingRefs = references{} + p.existingImports = map[string]*ImportInfo{} + + // Load basic information about the file in question. + p.allRefs = collectReferences(p.f) + + // Load stuff from other files in the same package: + // global variables so we know they don't need resolving, and imports + // that we might want to mimic. + globals := map[string]bool{} + for _, otherFile := range p.otherFiles { + // Don't load globals from files that are in the same directory + // but a different package. Using them to suggest imports is OK. + if p.f.Name.Name == otherFile.Name.Name { + addGlobals(otherFile, globals) + } + p.candidates = append(p.candidates, collectImports(otherFile)...) + } + + // Resolve all the import paths we've seen to package names, and store + // f's imports by the identifier they introduce. + imports := collectImports(p.f) + if p.loadRealPackageNames { + err := p.loadPackageNames(append(imports, p.candidates...)) + if err != nil { + if p.env.Debug { + p.env.Logf("loading package names: %v", err) + } + return nil, false + } + } + for _, imp := range imports { + p.existingImports[p.importIdentifier(imp)] = imp + } + + // Find missing references. + for left, rights := range p.allRefs { + if globals[left] { + continue + } + _, ok := p.existingImports[left] + if !ok { + p.missingRefs[left] = rights + continue + } + } + if len(p.missingRefs) != 0 { + return nil, false + } + + return p.fix() +} + +// fix attempts to satisfy missing imports using p.candidates. If it finds +// everything, or if p.lastTry is true, it updates fixes to add the imports it found, +// delete anything unused, and update import names, and returns true. +func (p *pass) fix() ([]*ImportFix, bool) { + // Find missing imports. + var selected []*ImportInfo + for left, rights := range p.missingRefs { + if imp := p.findMissingImport(left, rights); imp != nil { + selected = append(selected, imp) + } + } + + if !p.lastTry && len(selected) != len(p.missingRefs) { + return nil, false + } + + // Found everything, or giving up. Add the new imports and remove any unused. + var fixes []*ImportFix + for _, imp := range p.existingImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } + + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } + } + + for _, imp := range selected { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: p.importSpecName(imp), + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: AddImport, + }) + } + + return fixes, true +} + +// importSpecName gets the import name of imp in the import spec. +// +// When the import identifier matches the assumed import name, the import name does +// not appear in the import spec. +func (p *pass) importSpecName(imp *ImportInfo) string { + // If we did not load the real package names, or the name is already set, + // we just return the existing name. + if !p.loadRealPackageNames || imp.Name != "" { + return imp.Name + } + + ident := p.importIdentifier(imp) + if ident == importPathToAssumedName(imp.ImportPath) { + return "" // ident not needed since the assumed and real names are the same. + } + return ident +} + +// apply will perform the fixes on f in order. +func apply(fset *token.FileSet, f *ast.File, fixes []*ImportFix) { + for _, fix := range fixes { + switch fix.FixType { + case DeleteImport: + astutil.DeleteNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case AddImport: + astutil.AddNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case SetImportName: + // Find the matching import path and change the name. + for _, spec := range f.Imports { + path := strings.Trim(spec.Path.Value, `"`) + if path == fix.StmtInfo.ImportPath { + spec.Name = &ast.Ident{ + Name: fix.StmtInfo.Name, + NamePos: spec.Pos(), + } + } + } + } + } +} + +// assumeSiblingImportsValid assumes that siblings' use of packages is valid, +// adding the exports they use. +func (p *pass) assumeSiblingImportsValid() { + for _, f := range p.otherFiles { + refs := collectReferences(f) + imports := collectImports(f) + importsByName := map[string]*ImportInfo{} + for _, imp := range imports { + importsByName[p.importIdentifier(imp)] = imp + } + for left, rights := range refs { + if imp, ok := importsByName[left]; ok { + if _, ok := stdlib[imp.ImportPath]; ok { + // We have the stdlib in memory; no need to guess. + rights = stdlib[imp.ImportPath] + } + p.addCandidate(imp, &packageInfo{ + // no name; we already know it. + exports: rights, + }) + } + } + } +} + +// addCandidate adds a candidate import to p, and merges in the information +// in pkg. +func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { + p.candidates = append(p.candidates, imp) + if existing, ok := p.knownPackages[imp.ImportPath]; ok { + if existing.name == "" { + existing.name = pkg.name + } + for export := range pkg.exports { + existing.exports[export] = true + } + } else { + p.knownPackages[imp.ImportPath] = pkg + } +} + +// fixImports adds and removes imports from f so that all its references are +// satisfied and there are no unused imports. +// +// This is declared as a variable rather than a function so goimports can +// easily be extended by adding a file with an init function. +var fixImports = fixImportsDefault + +func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { + fixes, err := getFixes(fset, f, filename, env) + if err != nil { + return err + } + apply(fset, f, fixes) + return err +} + +// getFixes gets the import fixes that need to be made to f in order to fix the imports. +// It does not modify the ast. +func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + if env.Debug { + env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + } + + // First pass: looking only at f, and using the naive algorithm to + // derive package names from import paths, see if the file is already + // complete. We can't add any imports yet, because we don't know + // if missing references are actually package vars. + p := &pass{fset: fset, f: f, srcDir: srcDir} + if fixes, done := p.load(); done { + return fixes, nil + } + + otherFiles := parseOtherFiles(fset, srcDir, filename) + + // Second pass: add information from other files in the same package, + // like their package vars and imports. + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + // Now we can try adding imports from the stdlib. + p.assumeSiblingImportsValid() + addStdlibCandidates(p, p.missingRefs) + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Third pass: get real package names where we had previously used + // the naive algorithm. This is the first step that will use the + // environment, so we provide it here for the first time. + p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p.loadRealPackageNames = true + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + addStdlibCandidates(p, p.missingRefs) + p.assumeSiblingImportsValid() + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Go look for candidates in $GOPATH, etc. We don't necessarily load + // the real exports of sibling imports, so keep assuming their contents. + if err := addExternalCandidates(p, p.missingRefs, filename); err != nil { + return nil, err + } + + p.lastTry = true + fixes, _ := p.fix() + return fixes, nil +} + +// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. +func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) { + // TODO(suzmue): scan for additional candidates and filter out + // current package. + + // Get the stdlib candidates and sort by import path. + var paths []string + for importPath := range stdlib { + paths = append(paths, importPath) + } + sort.Strings(paths) + + var imports []ImportFix + for _, importPath := range paths { + imports = append(imports, ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: importPath, + }, + IdentName: path.Base(importPath), + FixType: AddImport, + }) + } + return imports, nil +} + +// ProcessEnv contains environment variables and settings that affect the use of +// the go command, the go/build package, etc. +type ProcessEnv struct { + LocalPrefix string + Debug bool + + // If non-empty, these will be used instead of the + // process-wide values. + GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string + WorkingDir string + + // If true, use go/packages regardless of the environment. + ForceGoPackages bool + + // Logf is the default logger for the ProcessEnv. + Logf func(format string, args ...interface{}) + + resolver Resolver +} + +func (e *ProcessEnv) env() []string { + env := os.Environ() + add := func(k, v string) { + if v != "" { + env = append(env, k+"="+v) + } + } + add("GOPATH", e.GOPATH) + add("GOROOT", e.GOROOT) + add("GO111MODULE", e.GO111MODULE) + add("GOPROXY", e.GOPROXY) + add("GOFLAGS", e.GOFLAGS) + add("GOSUMDB", e.GOSUMDB) + if e.WorkingDir != "" { + add("PWD", e.WorkingDir) + } + return env +} + +func (e *ProcessEnv) GetResolver() Resolver { + if e.resolver != nil { + return e.resolver + } + if e.ForceGoPackages { + e.resolver = &goPackagesResolver{env: e} + return e.resolver + } + + out, err := e.invokeGo("env", "GOMOD") + if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { + e.resolver = &gopathResolver{env: e} + return e.resolver + } + e.resolver = &ModuleResolver{env: e} + return e.resolver +} + +func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config { + return &packages.Config{ + Mode: mode, + Dir: e.WorkingDir, + Env: e.env(), + } +} + +func (e *ProcessEnv) buildContext() *build.Context { + ctx := build.Default + ctx.GOROOT = e.GOROOT + ctx.GOPATH = e.GOPATH + return &ctx +} + +func (e *ProcessEnv) invokeGo(args ...string) (*bytes.Buffer, error) { + cmd := exec.Command("go", args...) + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + cmd.Stdout = stdout + cmd.Stderr = stderr + cmd.Env = e.env() + cmd.Dir = e.WorkingDir + + if e.Debug { + defer func(start time.Time) { e.Logf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + } + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("running go: %v (stderr:\n%s)", err, stderr) + } + return stdout, nil +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) +} + +func addStdlibCandidates(pass *pass, refs references) { + add := func(pkg string) { + pass.addCandidate( + &ImportInfo{ImportPath: pkg}, + &packageInfo{name: path.Base(pkg), exports: stdlib[pkg]}) + } + for left := range refs { + if left == "rand" { + // Make sure we try crypto/rand before math/rand. + add("crypto/rand") + add("math/rand") + continue + } + for importPath := range stdlib { + if path.Base(importPath) == left { + add(importPath) + } + } + } +} + +// A Resolver does the build-system-specific parts of goimports. +type Resolver interface { + // loadPackageNames loads the package names in importPaths. + loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + // scan finds (at least) the packages satisfying refs. The returned slice is unordered. + scan(refs references) ([]*pkg, error) + // loadExports returns the set of exported symbols in the package at dir. + // It returns an error if the package name in dir does not match expectPackage. + // loadExports may be called concurrently. + loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) +} + +// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages. +type goPackagesResolver struct { + env *ProcessEnv +} + +func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + if len(importPaths) == 0 { + return nil, nil + } + cfg := r.env.newPackagesConfig(packages.LoadFiles) + pkgs, err := packages.Load(cfg, importPaths...) + if err != nil { + return nil, err + } + names := map[string]string{} + for _, pkg := range pkgs { + names[VendorlessPath(pkg.PkgPath)] = pkg.Name + } + // We may not have found all the packages. Guess the rest. + for _, path := range importPaths { + if _, ok := names[path]; ok { + continue + } + names[path] = importPathToAssumedName(path) + } + return names, nil + +} + +func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) { + var loadQueries []string + for pkgName := range refs { + loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) + } + sort.Strings(loadQueries) + cfg := r.env.newPackagesConfig(packages.LoadFiles) + goPackages, err := packages.Load(cfg, loadQueries...) + if err != nil { + return nil, err + } + + var scan []*pkg + for _, goPackage := range goPackages { + scan = append(scan, &pkg{ + dir: filepath.Dir(goPackage.CompiledGoFiles[0]), + importPathShort: VendorlessPath(goPackage.PkgPath), + goPackage: goPackage, + }) + } + return scan, nil +} + +func (r *goPackagesResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) { + if pkg.goPackage == nil { + return nil, fmt.Errorf("goPackage not set") + } + exports := map[string]bool{} + fset := token.NewFileSet() + for _, fname := range pkg.goPackage.CompiledGoFiles { + f, err := parser.ParseFile(fset, fname, nil, 0) + if err != nil { + return nil, fmt.Errorf("parsing %s: %v", fname, err) + } + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports[name] = true + } + } + } + return exports, nil +} + +func addExternalCandidates(pass *pass, refs references, filename string) error { + dirScan, err := pass.env.GetResolver().scan(refs) + if err != nil { + return err + } + + // Search for imports matching potential package references. + type result struct { + imp *ImportInfo + pkg *packageInfo + } + results := make(chan result, len(refs)) + + ctx, cancel := context.WithCancel(context.TODO()) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + var ( + firstErr error + firstErrOnce sync.Once + ) + for pkgName, symbols := range refs { + wg.Add(1) + go func(pkgName string, symbols map[string]bool) { + defer wg.Done() + + found, err := findImport(ctx, pass, dirScan, pkgName, symbols, filename) + + if err != nil { + firstErrOnce.Do(func() { + firstErr = err + cancel() + }) + return + } + + if found == nil { + return // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + + pkg := &packageInfo{ + name: pkgName, + exports: symbols, + } + results <- result{imp, pkg} + }(pkgName, symbols) + } + go func() { + wg.Wait() + close(results) + }() + + for result := range results { + pass.addCandidate(result.imp, result.pkg) + } + return firstErr +} + +// notIdentifier reports whether ch is an invalid identifier character. +func notIdentifier(ch rune) bool { + return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || + '0' <= ch && ch <= '9' || + ch == '_' || + ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) +} + +// importPathToAssumedName returns the assumed package name of an import path. +// It does this using only string parsing of the import path. +// It picks the last element of the path that does not look like a major +// version, and then picks the valid identifier off the start of that element. +// It is used to determine if a local rename should be added to an import for +// clarity. +// This function could be moved to a standard package and exported if we want +// for use in other tools. +func importPathToAssumedName(importPath string) string { + base := path.Base(importPath) + if strings.HasPrefix(base, "v") { + if _, err := strconv.Atoi(base[1:]); err == nil { + dir := path.Dir(importPath) + if dir != "." { + base = path.Base(dir) + } + } + } + base = strings.TrimPrefix(base, "go-") + if i := strings.IndexFunc(base, notIdentifier); i >= 0 { + base = base[:i] + } + return base +} + +// gopathResolver implements resolver for GOPATH workspaces. +type gopathResolver struct { + env *ProcessEnv +} + +func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + names := map[string]string{} + for _, path := range importPaths { + names[path] = importPathToName(r.env, path, srcDir) + } + return names, nil +} + +// importPathToName finds out the actual package name, as declared in its .go files. +// If there's a problem, it returns "". +func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) { + // Fast path for standard library without going to disk. + if _, ok := stdlib[importPath]; ok { + return path.Base(importPath) // stdlib packages always match their paths. + } + + buildPkg, err := env.buildContext().Import(importPath, srcDir, build.FindOnly) + if err != nil { + return "" + } + pkgName, err := packageDirToName(buildPkg.Dir) + if err != nil { + return "" + } + return pkgName +} + +// packageDirToName is a faster version of build.Import if +// the only thing desired is the package name. Given a directory, +// packageDirToName then only parses one file in the package, +// trusting that the files in the directory are consistent. +func packageDirToName(dir string) (packageName string, err error) { + d, err := os.Open(dir) + if err != nil { + return "", err + } + names, err := d.Readdirnames(-1) + d.Close() + if err != nil { + return "", err + } + sort.Strings(names) // to have predictable behavior + var lastErr error + var nfile int + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + if strings.HasSuffix(name, "_test.go") { + continue + } + nfile++ + fullFile := filepath.Join(dir, name) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) + if err != nil { + lastErr = err + continue + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName == "main" { + // Also skip package main, assuming it's a +build ignore generator or example. + // Since you can't import a package main anyway, there's no harm here. + continue + } + return pkgName, nil + } + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no importable package found in %d Go files", nfile) +} + +type pkg struct { + goPackage *packages.Package + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") +} + +type pkgDistance struct { + pkg *pkg + distance int // relative distance to target +} + +// byDistanceOrImportPathShortLength sorts by relative distance breaking ties +// on the short import path length and then the import string itself. +type byDistanceOrImportPathShortLength []pkgDistance + +func (s byDistanceOrImportPathShortLength) Len() int { return len(s) } +func (s byDistanceOrImportPathShortLength) Less(i, j int) bool { + di, dj := s[i].distance, s[j].distance + if di == -1 { + return false + } + if dj == -1 { + return true + } + if di != dj { + return di < dj + } + + vi, vj := s[i].pkg.importPathShort, s[j].pkg.importPathShort + if len(vi) != len(vj) { + return len(vi) < len(vj) + } + return vi < vj +} +func (s byDistanceOrImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func distance(basepath, targetpath string) int { + p, err := filepath.Rel(basepath, targetpath) + if err != nil { + return -1 + } + if p == "." { + return 0 + } + return strings.Count(p, string(filepath.Separator)) + 1 +} + +func (r *gopathResolver) scan(_ references) ([]*pkg, error) { + dupCheck := make(map[string]bool) + var result []*pkg + + var mu sync.Mutex + + add := func(root gopathwalk.Root, dir string) { + mu.Lock() + defer mu.Unlock() + + if _, dup := dupCheck[dir]; dup { + return + } + dupCheck[dir] = true + importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):]) + result = append(result, &pkg{ + importPathShort: VendorlessPath(importpath), + dir: dir, + }) + } + gopathwalk.Walk(gopathwalk.SrcDirsRoots(r.env.buildContext()), add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) + return result, nil +} + +func (r *gopathResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) { + return loadExportsFromFiles(ctx, r.env, expectPackage, pkg.dir) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, expectPackage string, dir string) (map[string]bool, error) { + exports := make(map[string]bool) + + // Look for non-test, buildable .go files which could provide exports. + all, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + var files []os.FileInfo + for _, fi := range all { + name := fi.Name() + if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") { + continue + } + match, err := env.buildContext().MatchFile(dir, fi.Name()) + if err != nil || !match { + continue + } + files = append(files, fi) + } + + if len(files) == 0 { + return nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir) + } + + fset := token.NewFileSet() + for _, fi := range files { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + fullFile := filepath.Join(dir, fi.Name()) + f, err := parser.ParseFile(fset, fullFile, nil, 0) + if err != nil { + return nil, fmt.Errorf("parsing %s: %v", fullFile, err) + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by MatchFile above. + continue + } + if pkgName != expectPackage { + return nil, fmt.Errorf("scan of dir %v is not expected package %v (actually %v)", dir, expectPackage, pkgName) + } + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports[name] = true + } + } + } + + if env.Debug { + exportList := make([]string, 0, len(exports)) + for k := range exports { + exportList = append(exportList, k) + } + sort.Strings(exportList) + env.Logf("loaded exports in dir %v (package %v): %v", dir, expectPackage, strings.Join(exportList, ", ")) + } + return exports, nil +} + +// findImport searches for a package with the given symbols. +// If no package is found, findImport returns ("", false, nil) +func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { + pkgDir, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + pkgDir = filepath.Dir(pkgDir) + + // Find candidate packages, looking only at their directory names first. + var candidates []pkgDistance + for _, pkg := range dirScan { + if pkg.dir == pkgDir && pass.f.Name.Name == pkgName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + continue + } + if pkgIsCandidate(filename, pkgName, pkg) { + candidates = append(candidates, pkgDistance{ + pkg: pkg, + distance: distance(pkgDir, pkg.dir), + }) + } + } + + // Sort the candidates by their import package length, + // assuming that shorter package names are better than long + // ones. Note that this sorts by the de-vendored name, so + // there's no "penalty" for vendoring. + sort.Sort(byDistanceOrImportPathShortLength(candidates)) + if pass.env.Debug { + for i, c := range candidates { + pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + } + } + + // Collect exports for packages with matching names. + + rescv := make([]chan *pkg, len(candidates)) + for i := range candidates { + rescv[i] = make(chan *pkg, 1) + } + const maxConcurrentPackageImport = 4 + loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + + ctx, cancel := context.WithCancel(ctx) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + for i, c := range candidates { + select { + case loadExportsSem <- struct{}{}: + case <-ctx.Done(): + return + } + + wg.Add(1) + go func(c pkgDistance, resc chan<- *pkg) { + defer func() { + <-loadExportsSem + wg.Done() + }() + + if pass.env.Debug { + pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + } + exports, err := pass.env.GetResolver().loadExports(ctx, pkgName, c.pkg) + if err != nil { + if pass.env.Debug { + pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + } + resc <- nil + return + } + + // If it doesn't have the right + // symbols, send nil to mean no match. + for symbol := range symbols { + if !exports[symbol] { + resc <- nil + return + } + } + resc <- c.pkg + }(c, rescv[i]) + } + }() + + for _, resc := range rescv { + pkg := <-resc + if pkg == nil { + continue + } + return pkg, nil + } + return nil, nil +} + +// pkgIsCandidate reports whether pkg is a candidate for satisfying the +// finding which package pkgIdent in the file named by filename is trying +// to refer to. +// +// This check is purely lexical and is meant to be as fast as possible +// because it's run over all $GOPATH directories to filter out poor +// candidates in order to limit the CPU and I/O later parsing the +// exports in candidate packages. +// +// filename is the file being formatted. +// pkgIdent is the package being searched for, like "client" (if +// searching for "client.New") +func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { + // Check "internal" and "vendor" visibility: + if !canUse(filename, pkg.dir) { + return false + } + + // Speed optimization to minimize disk I/O: + // the last two components on disk must contain the + // package name somewhere. + // + // This permits mismatch naming like directory + // "go-foo" being package "foo", or "pkg.v3" being "pkg", + // or directory "google.golang.org/api/cloudbilling/v1" + // being package "cloudbilling", but doesn't + // permit a directory "foo" to be package + // "bar", which is strongly discouraged + // anyway. There's no reason goimports needs + // to be slow just to accommodate that. + lastTwo := lastTwoComponents(pkg.importPathShort) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } + + return false +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// canUse reports whether the package in dir is usable from filename, +// respecting the Go "internal" and "vendor" visibility rules. +func canUse(filename, dir string) bool { + // Fast path check, before any allocations. If it doesn't contain vendor + // or internal, it's not tricky: + // Note that this can false-negative on directories like "notinternal", + // but we check it correctly below. This is just a fast path. + if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { + return true + } + + dirSlash := filepath.ToSlash(dir) + if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { + return true + } + // Vendor or internal directory only visible from children of parent. + // That means the path from the current directory to the target directory + // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal + // or bar/vendor or bar/internal. + // After stripping all the leading ../, the only okay place to see vendor or internal + // is at the very beginning of the path. + absfile, err := filepath.Abs(filename) + if err != nil { + return false + } + absdir, err := filepath.Abs(dir) + if err != nil { + return false + } + rel, err := filepath.Rel(absfile, absdir) + if err != nil { + return false + } + relSlash := filepath.ToSlash(rel) + if i := strings.LastIndex(relSlash, "../"); i >= 0 { + relSlash = relSlash[i+len("../"):] + } + return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +type visitFn func(node ast.Node) ast.Visitor + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + return fn(node) +} diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go new file mode 100644 index 00000000..2c074cb2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -0,0 +1,388 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mkstdlib.go + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/build" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "io/ioutil" + "log" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" +) + +// Options is golang.org/x/tools/imports.Options with extra internal-only options. +type Options struct { + Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Process implements golang.org/x/tools/imports.Process with explicit context in env. +func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { + src, opt, err = initialize(filename, src, opt) + if err != nil { + return nil, err + } + + fileSet := token.NewFileSet() + file, adjust, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + if !opt.FormatOnly { + if err := fixImports(fileSet, file, filename, opt.Env); err != nil { + return nil, err + } + } + return formatFile(fileSet, file, src, adjust, opt) +} + +// FixImports returns a list of fixes to the imports that, when applied, +// will leave the imports in the same state as Process. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + src, opt, err = initialize(filename, src, opt) + if err != nil { + return nil, err + } + + fileSet := token.NewFileSet() + file, _, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + return getFixes(fileSet, file, filename, opt.Env) +} + +// ApplyFix will apply all of the fixes to the file and format it. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) { + src, opt, err = initialize(filename, src, opt) + if err != nil { + return nil, err + } + + fileSet := token.NewFileSet() + file, adjust, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + // Apply the fixes to the file. + apply(fileSet, file, fixes) + + return formatFile(fileSet, file, src, adjust, opt) +} + +// GetAllCandidates gets all of the standard library candidate packages to import in +// sorted order on import path. +func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) { + _, opt, err = initialize(filename, []byte{}, opt) + if err != nil { + return nil, err + } + return getAllCandidates(filename, opt.Env) +} + +// initialize sets the values for opt and src. +// If they are provided, they are not changed. Otherwise opt is set to the +// default values and src is read from the file system. +func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) { + // Use defaults if opt is nil. + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } + + // Set the env if the user has not provided it. + if opt.Env == nil { + opt.Env = &ProcessEnv{ + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + } + } + + // Set the logger if the user has not provided it. + if opt.Env.Logf == nil { + opt.Env.Logf = log.Printf + } + + if src == nil { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, nil, err + } + src = b + } + + return src, opt, nil +} + +func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { + mergeImports(opt.Env, fileSet, file) + sortImports(opt.Env, fileSet, file) + imps := astutil.Imports(fileSet, file) + var spacesBefore []string // import paths we need spaces before + for _, impSection := range imps { + // Within each block of contiguous imports, see if any + // import lines are in different group numbers. If so, + // we'll need to put a space between them so it's + // compatible with gofmt. + lastGroup := -1 + for _, importSpec := range impSection { + importPath, _ := strconv.Unquote(importSpec.Path.Value) + groupNum := importGroup(opt.Env, importPath) + if groupNum != lastGroup && lastGroup != -1 { + spacesBefore = append(spacesBefore, importPath) + } + lastGroup = groupNum + } + + } + + printerMode := printer.UseSpaces + if opt.TabIndent { + printerMode |= printer.TabIndent + } + printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} + + var buf bytes.Buffer + err := printConfig.Fprint(&buf, fileSet, file) + if err != nil { + return nil, err + } + out := buf.Bytes() + if adjust != nil { + out = adjust(src, out) + } + if len(spacesBefore) > 0 { + out, err = addImportSpaces(bytes.NewReader(out), spacesBefore) + if err != nil { + return nil, err + } + } + + out, err = format.Source(out) + if err != nil { + return nil, err + } + return out, nil +} + +// parse parses src, which was read from filename, +// as a Go source file or statement list. +func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + + // Try as whole source file. + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err == nil { + return file, nil, nil + } + // If the error is that the source file didn't begin with a + // package line and we accept fragmented input, fall through to + // try as a source fragment. Stop and return on any other error. + if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + return nil, nil, err + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ;, not a newline, so that parse errors are on + // the correct line. + const prefix = "package main;" + psrc := append([]byte(prefix), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + // Gofmt will turn the ; into a \n. + // Do that ourselves now and update the file contents, + // so that positions and line numbers are correct going forward. + psrc[len(prefix)-1] = '\n' + fset.File(file.Package).SetLinesForContent(psrc) + + // If a main function exists, we will assume this is a main + // package and leave the file. + if containsMainFunc(file) { + return file, nil, nil + } + + adjust := func(orig, src []byte) []byte { + // Remove the package clause. + src = src[len(prefix):] + return matchSpace(orig, src) + } + return file, adjust, nil + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return nil, nil, err + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ;, not a newline, so that the line numbers + // in fsrc match the ones in src. + fsrc := append(append([]byte("package p; func _() {"), src...), '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + adjust := func(orig, src []byte) []byte { + // Remove the wrapping. + // Gofmt has turned the ; into a \n\n. + src = src[len("package p\n\nfunc _() {"):] + src = src[:len(src)-len("}\n")] + // Gofmt has also indented the function body one level. + // Remove that indent. + src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + return matchSpace(orig, src) + } + return file, adjust, nil + } + + // Failed, and out of options. + return nil, nil, err +} + +// containsMainFunc checks if a file contains a function declaration with the +// function signature 'func main()' +func containsMainFunc(file *ast.File) bool { + for _, decl := range file.Decls { + if f, ok := decl.(*ast.FuncDecl); ok { + if f.Name.Name != "main" { + continue + } + + if len(f.Type.Params.List) != 0 { + continue + } + + if f.Type.Results != nil && len(f.Type.Results.List) != 0 { + continue + } + + return true + } + } + + return false +} + +func cutSpace(b []byte) (before, middle, after []byte) { + i := 0 + for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { + i++ + } + j := len(b) + for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { + j-- + } + if i <= j { + return b[:i], b[i:j], b[j:] + } + return nil, nil, b[j:] +} + +// matchSpace reformats src to use the same space context as orig. +// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src. +// 2) matchSpace copies the indentation of the first non-blank line in orig +// to every non-blank line in src. +// 3) matchSpace copies the trailing space from orig and uses it in place +// of src's trailing space. +func matchSpace(orig []byte, src []byte) []byte { + before, _, after := cutSpace(orig) + i := bytes.LastIndex(before, []byte{'\n'}) + before, indent := before[:i+1], before[i+1:] + + _, src, _ = cutSpace(src) + + var b bytes.Buffer + b.Write(before) + for len(src) > 0 { + line := src + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, src = line[:i+1], line[i+1:] + } else { + src = nil + } + if len(line) > 0 && line[0] != '\n' { // not blank + b.Write(indent) + } + b.Write(line) + } + b.Write(after) + return b.Bytes() +} + +var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`) + +func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) { + var out bytes.Buffer + in := bufio.NewReader(r) + inImports := false + done := false + for { + s, err := in.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + if !inImports && !done && strings.HasPrefix(s, "import") { + inImports = true + } + if inImports && (strings.HasPrefix(s, "var") || + strings.HasPrefix(s, "func") || + strings.HasPrefix(s, "const") || + strings.HasPrefix(s, "type")) { + done = true + inImports = false + } + if inImports && len(breaks) > 0 { + if m := impLine.FindStringSubmatch(s); m != nil { + if m[1] == breaks[0] { + out.WriteByte('\n') + breaks = breaks[1:] + } + } + } + + fmt.Fprint(&out, s) + } + return out.Bytes(), nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/mkindex.go b/vendor/golang.org/x/tools/internal/imports/mkindex.go new file mode 100644 index 00000000..ef8c0d28 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mkindex.go @@ -0,0 +1,173 @@ +// +build ignore + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Command mkindex creates the file "pkgindex.go" containing an index of the Go +// standard library. The file is intended to be built as part of the imports +// package, so that the package may be used in environments where a GOROOT is +// not available (such as App Engine). +package imports + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/format" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "strings" +) + +var ( + pkgIndex = make(map[string][]pkg) + exports = make(map[string]map[string]bool) +) + +func main() { + // Don't use GOPATH. + ctx := build.Default + ctx.GOPATH = "" + + // Populate pkgIndex global from GOROOT. + for _, path := range ctx.SrcDirs() { + f, err := os.Open(path) + if err != nil { + log.Print(err) + continue + } + children, err := f.Readdir(-1) + f.Close() + if err != nil { + log.Print(err) + continue + } + for _, child := range children { + if child.IsDir() { + loadPkg(path, child.Name()) + } + } + } + // Populate exports global. + for _, ps := range pkgIndex { + for _, p := range ps { + e := loadExports(p.dir) + if e != nil { + exports[p.dir] = e + } + } + } + + // Construct source file. + var buf bytes.Buffer + fmt.Fprint(&buf, pkgIndexHead) + fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex) + fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports) + src := buf.Bytes() + + // Replace main.pkg type name with pkg. + src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1) + // Replace actual GOROOT with "/go". + src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1) + // Add some line wrapping. + src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1) + src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1) + + var err error + src, err = format.Source(src) + if err != nil { + log.Fatal(err) + } + + // Write out source file. + err = ioutil.WriteFile("pkgindex.go", src, 0644) + if err != nil { + log.Fatal(err) + } +} + +const pkgIndexHead = `package imports + +func init() { + pkgIndexOnce.Do(func() { + pkgIndex.m = pkgIndexMaster + }) + loadExports = func(dir string) map[string]bool { + return exportsMaster[dir] + } +} +` + +type pkg struct { + importpath string // full pkg import path, e.g. "net/http" + dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt" +} + +var fset = token.NewFileSet() + +func loadPkg(root, importpath string) { + shortName := path.Base(importpath) + if shortName == "testdata" { + return + } + + dir := filepath.Join(root, importpath) + pkgIndex[shortName] = append(pkgIndex[shortName], pkg{ + importpath: importpath, + dir: dir, + }) + + pkgDir, err := os.Open(dir) + if err != nil { + return + } + children, err := pkgDir.Readdir(-1) + pkgDir.Close() + if err != nil { + return + } + for _, child := range children { + name := child.Name() + if name == "" { + continue + } + if c := name[0]; c == '.' || ('0' <= c && c <= '9') { + continue + } + if child.IsDir() { + loadPkg(root, filepath.Join(importpath, name)) + } + } +} + +func loadExports(dir string) map[string]bool { + exports := make(map[string]bool) + buildPkg, err := build.ImportDir(dir, 0) + if err != nil { + if strings.Contains(err.Error(), "no buildable Go source files in") { + return nil + } + log.Printf("could not import %q: %v", dir, err) + return nil + } + for _, file := range buildPkg.GoFiles { + f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0) + if err != nil { + log.Printf("could not parse %q: %v", file, err) + continue + } + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports[name] = true + } + } + } + return exports +} diff --git a/vendor/golang.org/x/tools/internal/imports/mkstdlib.go b/vendor/golang.org/x/tools/internal/imports/mkstdlib.go new file mode 100644 index 00000000..fa69eebb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mkstdlib.go @@ -0,0 +1,128 @@ +// +build ignore + +// mkstdlib generates the zstdlib.go file, containing the Go standard +// library API symbols. It's baked into the binary to avoid scanning +// GOPATH in the common case. +package main + +import ( + "bufio" + "bytes" + "fmt" + "go/format" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "sort" +) + +func mustOpen(name string) io.Reader { + f, err := os.Open(name) + if err != nil { + log.Fatal(err) + } + return f +} + +func api(base string) string { + return filepath.Join(runtime.GOROOT(), "api", base) +} + +var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`) + +var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true} + +func main() { + var buf bytes.Buffer + outf := func(format string, args ...interface{}) { + fmt.Fprintf(&buf, format, args...) + } + outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n") + outf("package imports\n") + outf("var stdlib = map[string]map[string]bool{\n") + f := io.MultiReader( + mustOpen(api("go1.txt")), + mustOpen(api("go1.1.txt")), + mustOpen(api("go1.2.txt")), + mustOpen(api("go1.3.txt")), + mustOpen(api("go1.4.txt")), + mustOpen(api("go1.5.txt")), + mustOpen(api("go1.6.txt")), + mustOpen(api("go1.7.txt")), + mustOpen(api("go1.8.txt")), + mustOpen(api("go1.9.txt")), + mustOpen(api("go1.10.txt")), + mustOpen(api("go1.11.txt")), + mustOpen(api("go1.12.txt")), + mustOpen(api("go1.13.txt")), + + // The API of the syscall/js package needs to be computed explicitly, + // because it's not included in the GOROOT/api/go1.*.txt files at this time. + syscallJSAPI(), + ) + sc := bufio.NewScanner(f) + + pkgs := map[string]map[string]bool{ + "unsafe": unsafeSyms, + } + paths := []string{"unsafe"} + + for sc.Scan() { + l := sc.Text() + if m := sym.FindStringSubmatch(l); m != nil { + path, sym := m[1], m[2] + + if _, ok := pkgs[path]; !ok { + pkgs[path] = map[string]bool{} + paths = append(paths, path) + } + pkgs[path][sym] = true + } + } + if err := sc.Err(); err != nil { + log.Fatal(err) + } + sort.Strings(paths) + for _, path := range paths { + outf("\t%q: map[string]bool{\n", path) + pkg := pkgs[path] + var syms []string + for sym := range pkg { + syms = append(syms, sym) + } + sort.Strings(syms) + for _, sym := range syms { + outf("\t\t%q: true,\n", sym) + } + outf("},\n") + } + outf("}\n") + fmtbuf, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666) + if err != nil { + log.Fatal(err) + } +} + +// syscallJSAPI returns the API of the syscall/js package. +// It's computed from the contents of $(go env GOROOT)/src/syscall/js. +func syscallJSAPI() io.Reader { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js") + out, err := cmd.Output() + if err != nil { + log.Fatalln(err) + } + return bytes.NewReader(out) +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go new file mode 100644 index 00000000..387799bd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -0,0 +1,500 @@ +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/module" +) + +// ModuleResolver implements resolver for modules using the go command as little +// as feasible. +type ModuleResolver struct { + env *ProcessEnv + moduleCacheDir string + + Initialized bool + Main *ModuleJSON + ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... + ModsByDir []*ModuleJSON // ...or Dir. + + // moduleCacheInfo stores information about the module cache. + moduleCacheInfo *moduleCacheInfo +} + +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file for this module, if any + Error *ModuleErrorJSON // error loading module +} + +type ModuleErrorJSON struct { + Err string // the error itself +} + +func (r *ModuleResolver) init() error { + if r.Initialized { + return nil + } + stdout, err := r.env.invokeGo("list", "-m", "-json", "...") + if err != nil { + return err + } + for dec := json.NewDecoder(stdout); dec.More(); { + mod := &ModuleJSON{} + if err := dec.Decode(mod); err != nil { + return err + } + if mod.Dir == "" { + if r.env.Debug { + r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) + } + // Can't do anything with a module that's not downloaded. + continue + } + r.ModsByModPath = append(r.ModsByModPath, mod) + r.ModsByDir = append(r.ModsByDir, mod) + if mod.Main { + r.Main = mod + } + } + + sort.Slice(r.ModsByModPath, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.ModsByModPath[x].Path, "/") + } + return count(j) < count(i) // descending order + }) + sort.Slice(r.ModsByDir, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.ModsByDir[x].Dir, "/") + } + return count(j) < count(i) // descending order + }) + + if r.moduleCacheInfo == nil { + r.moduleCacheInfo = &moduleCacheInfo{ + modCacheDirInfo: make(map[string]*directoryPackageInfo), + } + } + + r.Initialized = true + return nil +} + +// findPackage returns the module and directory that contains the package at +// the given import path, or returns nil, "" if no module is in scope. +func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { + // This can't find packages in the stdlib, but that's harmless for all + // the existing code paths. + for _, m := range r.ModsByModPath { + if !strings.HasPrefix(importPath, m.Path) { + continue + } + pathInModule := importPath[len(m.Path):] + pkgDir := filepath.Join(m.Dir, pathInModule) + if r.dirIsNestedModule(pkgDir, m) { + continue + } + + if info, ok := r.moduleCacheInfo.Load(pkgDir); ok { + if packageScanned, err := info.reachedStatus(directoryScanned); packageScanned { + if err != nil { + // There was some error with scanning this directory. + // It does not contain a valid package. + continue + } + return m, pkgDir + } + } + + pkgFiles, err := ioutil.ReadDir(pkgDir) + if err != nil { + continue + } + + // A module only contains a package if it has buildable go + // files in that directory. If not, it could be provided by an + // outer module. See #29736. + for _, fi := range pkgFiles { + if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok { + return m, pkgDir + } + } + } + return nil, "" +} + +// findModuleByDir returns the module that contains dir, or nil if no such +// module is in scope. +func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { + // This is quite tricky and may not be correct. dir could be: + // - a package in the main module. + // - a replace target underneath the main module's directory. + // - a nested module in the above. + // - a replace target somewhere totally random. + // - a nested module in the above. + // - in the mod cache. + // - in /vendor/ in -mod=vendor mode. + // - nested module? Dunno. + // Rumor has it that replace targets cannot contain other replace targets. + for _, m := range r.ModsByDir { + if !strings.HasPrefix(dir, m.Dir) { + continue + } + + if r.dirIsNestedModule(dir, m) { + continue + } + + return m + } + return nil +} + +// dirIsNestedModule reports if dir is contained in a nested module underneath +// mod, not actually in mod. +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool { + if !strings.HasPrefix(dir, mod.Dir) { + return false + } + if r.dirInModuleCache(dir) { + // Nested modules in the module cache are pruned, + // so it cannot be a nested module. + return false + } + mf := r.findModFile(dir) + if mf == "" { + return false + } + return filepath.Dir(mf) != mod.Dir +} + +func (r *ModuleResolver) findModFile(dir string) string { + if r.dirInModuleCache(dir) { + matches := modCacheRegexp.FindStringSubmatch(dir) + index := strings.Index(dir, matches[1]+"@"+matches[2]) + return filepath.Join(dir[:index], matches[1]+"@"+matches[2], "go.mod") + } + for { + f := filepath.Join(dir, "go.mod") + info, err := os.Stat(f) + if err == nil && !info.IsDir() { + return f + } + d := filepath.Dir(dir) + if len(d) >= len(dir) { + return "" // reached top of file system, no go.mod + } + dir = d + } +} + +func (r *ModuleResolver) dirInModuleCache(dir string) bool { + if r.moduleCacheDir == "" { + return false + } + return strings.HasPrefix(dir, r.moduleCacheDir) +} + +func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + if err := r.init(); err != nil { + return nil, err + } + names := map[string]string{} + for _, path := range importPaths { + _, packageDir := r.findPackage(path) + if packageDir == "" { + continue + } + name, err := packageDirToName(packageDir) + if err != nil { + continue + } + names[path] = name + } + return names, nil +} + +func (r *ModuleResolver) scan(_ references) ([]*pkg, error) { + if err := r.init(); err != nil { + return nil, err + } + + // Walk GOROOT, GOPATH/pkg/mod, and the main module. + roots := []gopathwalk.Root{ + {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + } + if r.Main != nil { + roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule}) + } + if r.moduleCacheDir == "" { + r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") + } + roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) + + // Walk replace targets, just in case they're not in any of the above. + for _, mod := range r.ModsByModPath { + if mod.Replace != nil { + roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) + } + } + + var result []*pkg + dupCheck := make(map[string]bool) + var mu sync.Mutex + + // Packages in the module cache are immutable. If we have + // already seen this package on a previous scan of the module + // cache, return that result. + skip := func(root gopathwalk.Root, dir string) bool { + mu.Lock() + defer mu.Unlock() + // If we have already processed this directory on this walk, skip it. + if _, dup := dupCheck[dir]; dup { + return true + } + + // If we have saved this directory information, skip it. + info, ok := r.moduleCacheInfo.Load(dir) + if !ok { + return false + } + // This directory can be skipped as long as we have already scanned it. + // Packages with errors will continue to have errors, so there is no need + // to rescan them. + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + add := func(root gopathwalk.Root, dir string) { + mu.Lock() + defer mu.Unlock() + if _, dup := dupCheck[dir]; dup { + return + } + + info, err := r.scanDirForPackage(root, dir) + if err != nil { + return + } + if root.Type == gopathwalk.RootModuleCache { + // Save this package information in the cache and return. + // Packages from the module cache are added after Walk. + r.moduleCacheInfo.Store(dir, info) + return + } + + // Skip this package if there was an error loading package info. + if info.err != nil { + return + } + + // The rest of this function canonicalizes the packages using the results + // of initializing the resolver from 'go list -m'. + res, err := r.canonicalize(root.Type, info.nonCanonicalImportPath, info.dir, info.needsReplace) + if err != nil { + return + } + + result = append(result, res) + } + + gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) + + // Add the packages from the modules in the mod cache that were skipped. + for _, dir := range r.moduleCacheInfo.Keys() { + info, ok := r.moduleCacheInfo.Load(dir) + if !ok { + continue + } + + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + continue + } + + res, err := r.canonicalize(gopathwalk.RootModuleCache, info.nonCanonicalImportPath, info.dir, info.needsReplace) + if err != nil { + continue + } + result = append(result, res) + } + + return result, nil +} + +// canonicalize gets the result of canonicalizing the packages using the results +// of initializing the resolver from 'go list -m'. +func (r *ModuleResolver) canonicalize(rootType gopathwalk.RootType, importPath, dir string, needsReplace bool) (res *pkg, err error) { + // Packages in GOROOT are already canonical, regardless of the std/cmd modules. + if rootType == gopathwalk.RootGOROOT { + return &pkg{ + importPathShort: importPath, + dir: dir, + }, nil + } + + // Check if the directory is underneath a module that's in scope. + if mod := r.findModuleByDir(dir); mod != nil { + // It is. If dir is the target of a replace directive, + // our guessed import path is wrong. Use the real one. + if mod.Dir == dir { + importPath = mod.Path + } else { + dirInMod := dir[len(mod.Dir)+len("/"):] + importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) + } + } else if needsReplace { + return nil, fmt.Errorf("needed this package to be in scope: %s", dir) + } + + // We may have discovered a package that has a different version + // in scope already. Canonicalize to that one if possible. + if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" { + dir = canonicalDir + } + return &pkg{ + importPathShort: VendorlessPath(importPath), + dir: dir, + }, nil +} + +func (r *ModuleResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) { + if err := r.init(); err != nil { + return nil, err + } + return loadExportsFromFiles(ctx, r.env, expectPackage, pkg.dir) +} + +func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) (directoryPackageInfo, error) { + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + importPath := filepath.ToSlash(subdir) + if strings.HasPrefix(importPath, "vendor/") { + // Ignore vendor dirs. If -mod=vendor is on, then things + // should mostly just work, but when it's not vendor/ + // is a mess. There's no easy way to tell if it's on. + // We can still find things in the mod cache and + // map them into /vendor when -mod=vendor is on. + return directoryPackageInfo{}, fmt.Errorf("vendor directory") + } + switch root.Type { + case gopathwalk.RootCurrentModule: + importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir)) + case gopathwalk.RootModuleCache: + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + }, nil + } + modPath, err := module.DecodePath(filepath.ToSlash(matches[1])) + if err != nil { + if r.env.Debug { + r.env.Logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + }, nil + } + importPath = path.Join(modPath, filepath.ToSlash(matches[3])) + case gopathwalk.RootGOROOT: + importPath = subdir + } + + result := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + nonCanonicalImportPath: importPath, + needsReplace: false, + } + if root.Type == gopathwalk.RootGOROOT { + // stdlib packages are always in scope, despite the confusing go.mod + return result, nil + } + // Check that this package is not obviously impossible to import. + modFile := r.findModFile(dir) + + modBytes, err := ioutil.ReadFile(modFile) + if err == nil && !strings.HasPrefix(importPath, modulePath(modBytes)) { + // The module's declared path does not match + // its expected path. It probably needs a + // replace directive we don't have. + result.needsReplace = true + } + + return result, nil +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// modulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +// +// Copied from cmd/go/internal/modfile. +func modulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go new file mode 100644 index 00000000..f96b92d0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -0,0 +1,121 @@ +package imports + +import ( + "sync" +) + +// ModuleResolver implements Resolver for modules using the go command as little +// as feasible. +// +// To find packages to import, the resolver needs to know about all of the +// the packages that could be imported. This includes packages that are +// already in modules that are in (1) the current module, (2) replace targets, +// and (3) packages in the module cache. Packages in (1) and (2) may change over +// time, as the client may edit the current module and locally replaced modules. +// The module cache (which includes all of the packages in (3)) can only +// ever be added to. +// +// The resolver can thus save state about packages in the module cache +// and guarantee that this will not change over time. To obtain information +// about new modules added to the module cache, the module cache should be +// rescanned. +// +// It is OK to serve information about modules that have been deleted, +// as they do still exist. +// TODO(suzmue): can we share information with the caller about +// what module needs to be downloaded to import this package? + +type directoryPackageStatus int + +const ( + _ directoryPackageStatus = iota + directoryScanned +) + +type directoryPackageInfo struct { + // status indicates the extent to which this struct has been filled in. + status directoryPackageStatus + // err is non-nil when there was an error trying to reach status. + err error + + // Set when status > directoryScanned. + + // dir is the absolute directory of this package. + dir string + // nonCanonicalImportPath is the expected import path for this package. + // This may not be an import path that can be used to import this package. + nonCanonicalImportPath string + // needsReplace is true if the nonCanonicalImportPath does not match the + // the modules declared path, making it impossible to import without a + // replace directive. + needsReplace bool +} + +// reachedStatus returns true when info has a status at least target and any error associated with +// an attempt to reach target. +func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) { + if info.err == nil { + return info.status >= target, nil + } + if info.status == target { + return true, info.err + } + return true, nil +} + +// moduleCacheInfo is a concurrency safe map for storing information about +// the directories in the module cache. +// +// The information in this cache is built incrementally. Entries are initialized in scan. +// No new keys should be added in any other functions, as all directories containing +// packages are identified in scan. +// +// Other functions, including loadExports and findPackage, may update entries in this cache +// as they discover new things about the directory. +// +// We do not need to protect the data in the cache for multiple writes, because it only stores +// module cache directories, which do not change. If two competing stores take place, there will be +// one store that wins. Although this could result in a loss of information it will not be incorrect +// and may just result in recomputing the same result later. +// +// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) +type moduleCacheInfo struct { + mu sync.Mutex + // modCacheDirInfo stores information about packages in + // module cache directories. Keyed by absolute directory. + modCacheDirInfo map[string]*directoryPackageInfo +} + +// Store stores the package info for dir. +func (d *moduleCacheInfo) Store(dir string, info directoryPackageInfo) { + d.mu.Lock() + defer d.mu.Unlock() + d.modCacheDirInfo[dir] = &directoryPackageInfo{ + status: info.status, + err: info.err, + dir: info.dir, + nonCanonicalImportPath: info.nonCanonicalImportPath, + needsReplace: info.needsReplace, + } +} + +// Load returns a copy of the directoryPackageInfo for absolute directory dir. +func (d *moduleCacheInfo) Load(dir string) (directoryPackageInfo, bool) { + d.mu.Lock() + defer d.mu.Unlock() + info, ok := d.modCacheDirInfo[dir] + if !ok { + return directoryPackageInfo{}, false + } + return *info, true +} + +// Keys returns the keys currently present in d. +func (d *moduleCacheInfo) Keys() (keys []string) { + d.mu.Lock() + defer d.mu.Unlock() + for key := range d.modCacheDirInfo { + keys = append(keys, key) + } + return keys +} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go new file mode 100644 index 00000000..22627947 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -0,0 +1,280 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hacked up copy of go/ast/import.go + +package imports + +import ( + "go/ast" + "go/token" + "sort" + "strconv" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data loss. +func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { + for i, d := range f.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + break + } + + if len(d.Specs) == 0 { + // Empty import block, remove it. + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + } + + if !d.Lparen.IsValid() { + // Not a block: sorted by default. + continue + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { + // j begins a new run. End this one. + specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...) + i = j + } + } + specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...) + d.Specs = specs + + // Deduping can leave a blank line before the rparen; clean that up. + if len(d.Specs) > 0 { + lastSpec := d.Specs[len(d.Specs)-1] + lastLine := fset.Position(lastSpec.Pos()).Line + if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { + fset.File(d.Rparen).MergeLine(rParenLine - 1) + } + } + } +} + +// mergeImports merges all the import declarations into the first one. +// Taken from golang.org/x/tools/ast/astutil. +func mergeImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { + if len(f.Decls) <= 1 { + return + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } +} + +// declImports reports whether gen contains an import of path. +// Taken from golang.org/x/tools/ast/astutil. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +func importPath(s ast.Spec) string { + t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s ast.Spec) string { + n := s.(*ast.ImportSpec).Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s ast.Spec) string { + c := s.(*ast.ImportSpec).Comment + if c == nil { + return "" + } + return c.Text() +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next ast.Spec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + return prev.(*ast.ImportSpec).Comment == nil +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Identify comments in this range. + // Any comment from pos[0].Start to the final line counts. + lastLine := fset.Position(pos[len(pos)-1].End).Line + cstart := len(f.Comments) + cend := len(f.Comments) + for i, g := range f.Comments { + if g.Pos() < pos[0].Start { + continue + } + if i < cstart { + cstart = i + } + if fset.Position(g.End()).Line > lastLine { + cend = i + break + } + } + comments := f.Comments[cstart:cend] + + // Assign each comment to the import spec preceding it. + importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} + specIndex := 0 + for _, g := range comments { + for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { + specIndex++ + } + s := specs[specIndex].(*ast.ImportSpec) + importComment[s] = append(importComment[s], g) + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec{env, specs}) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } else { + p := s.Pos() + fset.File(p).MergeLine(fset.Position(p).Line) + } + } + specs = deduped + + // Fix up comment positions + for i, s := range specs { + s := s.(*ast.ImportSpec) + if s.Name != nil { + s.Name.NamePos = pos[i].Start + } + s.Path.ValuePos = pos[i].Start + s.EndPos = pos[i].End + nextSpecPos := pos[i].End + + for _, g := range importComment[s] { + for _, c := range g.List { + c.Slash = pos[i].End + nextSpecPos = c.End() + } + } + if i < len(specs)-1 { + pos[i+1].Start = nextSpecPos + pos[i+1].End = nextSpecPos + } + } + + sort.Sort(byCommentPos(comments)) + + // Fixup comments can insert blank lines, because import specs are on different lines. + // We remove those blank lines here by merging import spec to the first import spec line. + firstSpecLine := fset.Position(specs[0].Pos()).Line + for _, s := range specs[1:] { + p := s.Pos() + line := fset.File(p).Line(p) + for previousLine := line - 1; previousLine >= firstSpecLine; { + fset.File(p).MergeLine(previousLine) + previousLine-- + } + } + return specs +} + +type byImportSpec struct { + env *ProcessEnv + specs []ast.Spec // slice of *ast.ImportSpec +} + +func (x byImportSpec) Len() int { return len(x.specs) } +func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x.specs[i]) + jpath := importPath(x.specs[j]) + + igroup := importGroup(x.env, ipath) + jgroup := importGroup(x.env, jpath) + if igroup != jgroup { + return igroup < jgroup + } + + if ipath != jpath { + return ipath < jpath + } + iname := importName(x.specs[i]) + jname := importName(x.specs[j]) + + if iname != jname { + return iname < jname + } + return importComment(x.specs[i]) < importComment(x.specs[j]) +} + +type byCommentPos []*ast.CommentGroup + +func (x byCommentPos) Len() int { return len(x) } +func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go new file mode 100644 index 00000000..544339e5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -0,0 +1,10377 @@ +// Code generated by mkstdlib.go. DO NOT EDIT. + +package imports + +var stdlib = map[string]map[string]bool{ + "archive/tar": map[string]bool{ + "ErrFieldTooLong": true, + "ErrHeader": true, + "ErrWriteAfterClose": true, + "ErrWriteTooLong": true, + "FileInfoHeader": true, + "Format": true, + "FormatGNU": true, + "FormatPAX": true, + "FormatUSTAR": true, + "FormatUnknown": true, + "Header": true, + "NewReader": true, + "NewWriter": true, + "Reader": true, + "TypeBlock": true, + "TypeChar": true, + "TypeCont": true, + "TypeDir": true, + "TypeFifo": true, + "TypeGNULongLink": true, + "TypeGNULongName": true, + "TypeGNUSparse": true, + "TypeLink": true, + "TypeReg": true, + "TypeRegA": true, + "TypeSymlink": true, + "TypeXGlobalHeader": true, + "TypeXHeader": true, + "Writer": true, + }, + "archive/zip": map[string]bool{ + "Compressor": true, + "Decompressor": true, + "Deflate": true, + "ErrAlgorithm": true, + "ErrChecksum": true, + "ErrFormat": true, + "File": true, + "FileHeader": true, + "FileInfoHeader": true, + "NewReader": true, + "NewWriter": true, + "OpenReader": true, + "ReadCloser": true, + "Reader": true, + "RegisterCompressor": true, + "RegisterDecompressor": true, + "Store": true, + "Writer": true, + }, + "bufio": map[string]bool{ + "ErrAdvanceTooFar": true, + "ErrBufferFull": true, + "ErrFinalToken": true, + "ErrInvalidUnreadByte": true, + "ErrInvalidUnreadRune": true, + "ErrNegativeAdvance": true, + "ErrNegativeCount": true, + "ErrTooLong": true, + "MaxScanTokenSize": true, + "NewReadWriter": true, + "NewReader": true, + "NewReaderSize": true, + "NewScanner": true, + "NewWriter": true, + "NewWriterSize": true, + "ReadWriter": true, + "Reader": true, + "ScanBytes": true, + "ScanLines": true, + "ScanRunes": true, + "ScanWords": true, + "Scanner": true, + "SplitFunc": true, + "Writer": true, + }, + "bytes": map[string]bool{ + "Buffer": true, + "Compare": true, + "Contains": true, + "ContainsAny": true, + "ContainsRune": true, + "Count": true, + "Equal": true, + "EqualFold": true, + "ErrTooLarge": true, + "Fields": true, + "FieldsFunc": true, + "HasPrefix": true, + "HasSuffix": true, + "Index": true, + "IndexAny": true, + "IndexByte": true, + "IndexFunc": true, + "IndexRune": true, + "Join": true, + "LastIndex": true, + "LastIndexAny": true, + "LastIndexByte": true, + "LastIndexFunc": true, + "Map": true, + "MinRead": true, + "NewBuffer": true, + "NewBufferString": true, + "NewReader": true, + "Reader": true, + "Repeat": true, + "Replace": true, + "ReplaceAll": true, + "Runes": true, + "Split": true, + "SplitAfter": true, + "SplitAfterN": true, + "SplitN": true, + "Title": true, + "ToLower": true, + "ToLowerSpecial": true, + "ToTitle": true, + "ToTitleSpecial": true, + "ToUpper": true, + "ToUpperSpecial": true, + "ToValidUTF8": true, + "Trim": true, + "TrimFunc": true, + "TrimLeft": true, + "TrimLeftFunc": true, + "TrimPrefix": true, + "TrimRight": true, + "TrimRightFunc": true, + "TrimSpace": true, + "TrimSuffix": true, + }, + "compress/bzip2": map[string]bool{ + "NewReader": true, + "StructuralError": true, + }, + "compress/flate": map[string]bool{ + "BestCompression": true, + "BestSpeed": true, + "CorruptInputError": true, + "DefaultCompression": true, + "HuffmanOnly": true, + "InternalError": true, + "NewReader": true, + "NewReaderDict": true, + "NewWriter": true, + "NewWriterDict": true, + "NoCompression": true, + "ReadError": true, + "Reader": true, + "Resetter": true, + "WriteError": true, + "Writer": true, + }, + "compress/gzip": map[string]bool{ + "BestCompression": true, + "BestSpeed": true, + "DefaultCompression": true, + "ErrChecksum": true, + "ErrHeader": true, + "Header": true, + "HuffmanOnly": true, + "NewReader": true, + "NewWriter": true, + "NewWriterLevel": true, + "NoCompression": true, + "Reader": true, + "Writer": true, + }, + "compress/lzw": map[string]bool{ + "LSB": true, + "MSB": true, + "NewReader": true, + "NewWriter": true, + "Order": true, + }, + "compress/zlib": map[string]bool{ + "BestCompression": true, + "BestSpeed": true, + "DefaultCompression": true, + "ErrChecksum": true, + "ErrDictionary": true, + "ErrHeader": true, + "HuffmanOnly": true, + "NewReader": true, + "NewReaderDict": true, + "NewWriter": true, + "NewWriterLevel": true, + "NewWriterLevelDict": true, + "NoCompression": true, + "Resetter": true, + "Writer": true, + }, + "container/heap": map[string]bool{ + "Fix": true, + "Init": true, + "Interface": true, + "Pop": true, + "Push": true, + "Remove": true, + }, + "container/list": map[string]bool{ + "Element": true, + "List": true, + "New": true, + }, + "container/ring": map[string]bool{ + "New": true, + "Ring": true, + }, + "context": map[string]bool{ + "Background": true, + "CancelFunc": true, + "Canceled": true, + "Context": true, + "DeadlineExceeded": true, + "TODO": true, + "WithCancel": true, + "WithDeadline": true, + "WithTimeout": true, + "WithValue": true, + }, + "crypto": map[string]bool{ + "BLAKE2b_256": true, + "BLAKE2b_384": true, + "BLAKE2b_512": true, + "BLAKE2s_256": true, + "Decrypter": true, + "DecrypterOpts": true, + "Hash": true, + "MD4": true, + "MD5": true, + "MD5SHA1": true, + "PrivateKey": true, + "PublicKey": true, + "RIPEMD160": true, + "RegisterHash": true, + "SHA1": true, + "SHA224": true, + "SHA256": true, + "SHA384": true, + "SHA3_224": true, + "SHA3_256": true, + "SHA3_384": true, + "SHA3_512": true, + "SHA512": true, + "SHA512_224": true, + "SHA512_256": true, + "Signer": true, + "SignerOpts": true, + }, + "crypto/aes": map[string]bool{ + "BlockSize": true, + "KeySizeError": true, + "NewCipher": true, + }, + "crypto/cipher": map[string]bool{ + "AEAD": true, + "Block": true, + "BlockMode": true, + "NewCBCDecrypter": true, + "NewCBCEncrypter": true, + "NewCFBDecrypter": true, + "NewCFBEncrypter": true, + "NewCTR": true, + "NewGCM": true, + "NewGCMWithNonceSize": true, + "NewGCMWithTagSize": true, + "NewOFB": true, + "Stream": true, + "StreamReader": true, + "StreamWriter": true, + }, + "crypto/des": map[string]bool{ + "BlockSize": true, + "KeySizeError": true, + "NewCipher": true, + "NewTripleDESCipher": true, + }, + "crypto/dsa": map[string]bool{ + "ErrInvalidPublicKey": true, + "GenerateKey": true, + "GenerateParameters": true, + "L1024N160": true, + "L2048N224": true, + "L2048N256": true, + "L3072N256": true, + "ParameterSizes": true, + "Parameters": true, + "PrivateKey": true, + "PublicKey": true, + "Sign": true, + "Verify": true, + }, + "crypto/ecdsa": map[string]bool{ + "GenerateKey": true, + "PrivateKey": true, + "PublicKey": true, + "Sign": true, + "Verify": true, + }, + "crypto/ed25519": map[string]bool{ + "GenerateKey": true, + "NewKeyFromSeed": true, + "PrivateKey": true, + "PrivateKeySize": true, + "PublicKey": true, + "PublicKeySize": true, + "SeedSize": true, + "Sign": true, + "SignatureSize": true, + "Verify": true, + }, + "crypto/elliptic": map[string]bool{ + "Curve": true, + "CurveParams": true, + "GenerateKey": true, + "Marshal": true, + "P224": true, + "P256": true, + "P384": true, + "P521": true, + "Unmarshal": true, + }, + "crypto/hmac": map[string]bool{ + "Equal": true, + "New": true, + }, + "crypto/md5": map[string]bool{ + "BlockSize": true, + "New": true, + "Size": true, + "Sum": true, + }, + "crypto/rand": map[string]bool{ + "Int": true, + "Prime": true, + "Read": true, + "Reader": true, + }, + "crypto/rc4": map[string]bool{ + "Cipher": true, + "KeySizeError": true, + "NewCipher": true, + }, + "crypto/rsa": map[string]bool{ + "CRTValue": true, + "DecryptOAEP": true, + "DecryptPKCS1v15": true, + "DecryptPKCS1v15SessionKey": true, + "EncryptOAEP": true, + "EncryptPKCS1v15": true, + "ErrDecryption": true, + "ErrMessageTooLong": true, + "ErrVerification": true, + "GenerateKey": true, + "GenerateMultiPrimeKey": true, + "OAEPOptions": true, + "PKCS1v15DecryptOptions": true, + "PSSOptions": true, + "PSSSaltLengthAuto": true, + "PSSSaltLengthEqualsHash": true, + "PrecomputedValues": true, + "PrivateKey": true, + "PublicKey": true, + "SignPKCS1v15": true, + "SignPSS": true, + "VerifyPKCS1v15": true, + "VerifyPSS": true, + }, + "crypto/sha1": map[string]bool{ + "BlockSize": true, + "New": true, + "Size": true, + "Sum": true, + }, + "crypto/sha256": map[string]bool{ + "BlockSize": true, + "New": true, + "New224": true, + "Size": true, + "Size224": true, + "Sum224": true, + "Sum256": true, + }, + "crypto/sha512": map[string]bool{ + "BlockSize": true, + "New": true, + "New384": true, + "New512_224": true, + "New512_256": true, + "Size": true, + "Size224": true, + "Size256": true, + "Size384": true, + "Sum384": true, + "Sum512": true, + "Sum512_224": true, + "Sum512_256": true, + }, + "crypto/subtle": map[string]bool{ + "ConstantTimeByteEq": true, + "ConstantTimeCompare": true, + "ConstantTimeCopy": true, + "ConstantTimeEq": true, + "ConstantTimeLessOrEq": true, + "ConstantTimeSelect": true, + }, + "crypto/tls": map[string]bool{ + "Certificate": true, + "CertificateRequestInfo": true, + "Client": true, + "ClientAuthType": true, + "ClientHelloInfo": true, + "ClientSessionCache": true, + "ClientSessionState": true, + "Config": true, + "Conn": true, + "ConnectionState": true, + "CurveID": true, + "CurveP256": true, + "CurveP384": true, + "CurveP521": true, + "Dial": true, + "DialWithDialer": true, + "ECDSAWithP256AndSHA256": true, + "ECDSAWithP384AndSHA384": true, + "ECDSAWithP521AndSHA512": true, + "ECDSAWithSHA1": true, + "Ed25519": true, + "Listen": true, + "LoadX509KeyPair": true, + "NewLRUClientSessionCache": true, + "NewListener": true, + "NoClientCert": true, + "PKCS1WithSHA1": true, + "PKCS1WithSHA256": true, + "PKCS1WithSHA384": true, + "PKCS1WithSHA512": true, + "PSSWithSHA256": true, + "PSSWithSHA384": true, + "PSSWithSHA512": true, + "RecordHeaderError": true, + "RenegotiateFreelyAsClient": true, + "RenegotiateNever": true, + "RenegotiateOnceAsClient": true, + "RenegotiationSupport": true, + "RequestClientCert": true, + "RequireAndVerifyClientCert": true, + "RequireAnyClientCert": true, + "Server": true, + "SignatureScheme": true, + "TLS_AES_128_GCM_SHA256": true, + "TLS_AES_256_GCM_SHA384": true, + "TLS_CHACHA20_POLY1305_SHA256": true, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": true, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": true, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": true, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": true, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": true, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": true, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": true, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": true, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": true, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": true, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": true, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": true, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": true, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": true, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": true, + "TLS_FALLBACK_SCSV": true, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": true, + "TLS_RSA_WITH_AES_128_CBC_SHA": true, + "TLS_RSA_WITH_AES_128_CBC_SHA256": true, + "TLS_RSA_WITH_AES_128_GCM_SHA256": true, + "TLS_RSA_WITH_AES_256_CBC_SHA": true, + "TLS_RSA_WITH_AES_256_GCM_SHA384": true, + "TLS_RSA_WITH_RC4_128_SHA": true, + "VerifyClientCertIfGiven": true, + "VersionSSL30": true, + "VersionTLS10": true, + "VersionTLS11": true, + "VersionTLS12": true, + "VersionTLS13": true, + "X25519": true, + "X509KeyPair": true, + }, + "crypto/x509": map[string]bool{ + "CANotAuthorizedForExtKeyUsage": true, + "CANotAuthorizedForThisName": true, + "CertPool": true, + "Certificate": true, + "CertificateInvalidError": true, + "CertificateRequest": true, + "ConstraintViolationError": true, + "CreateCertificate": true, + "CreateCertificateRequest": true, + "DSA": true, + "DSAWithSHA1": true, + "DSAWithSHA256": true, + "DecryptPEMBlock": true, + "ECDSA": true, + "ECDSAWithSHA1": true, + "ECDSAWithSHA256": true, + "ECDSAWithSHA384": true, + "ECDSAWithSHA512": true, + "Ed25519": true, + "EncryptPEMBlock": true, + "ErrUnsupportedAlgorithm": true, + "Expired": true, + "ExtKeyUsage": true, + "ExtKeyUsageAny": true, + "ExtKeyUsageClientAuth": true, + "ExtKeyUsageCodeSigning": true, + "ExtKeyUsageEmailProtection": true, + "ExtKeyUsageIPSECEndSystem": true, + "ExtKeyUsageIPSECTunnel": true, + "ExtKeyUsageIPSECUser": true, + "ExtKeyUsageMicrosoftCommercialCodeSigning": true, + "ExtKeyUsageMicrosoftKernelCodeSigning": true, + "ExtKeyUsageMicrosoftServerGatedCrypto": true, + "ExtKeyUsageNetscapeServerGatedCrypto": true, + "ExtKeyUsageOCSPSigning": true, + "ExtKeyUsageServerAuth": true, + "ExtKeyUsageTimeStamping": true, + "HostnameError": true, + "IncompatibleUsage": true, + "IncorrectPasswordError": true, + "InsecureAlgorithmError": true, + "InvalidReason": true, + "IsEncryptedPEMBlock": true, + "KeyUsage": true, + "KeyUsageCRLSign": true, + "KeyUsageCertSign": true, + "KeyUsageContentCommitment": true, + "KeyUsageDataEncipherment": true, + "KeyUsageDecipherOnly": true, + "KeyUsageDigitalSignature": true, + "KeyUsageEncipherOnly": true, + "KeyUsageKeyAgreement": true, + "KeyUsageKeyEncipherment": true, + "MD2WithRSA": true, + "MD5WithRSA": true, + "MarshalECPrivateKey": true, + "MarshalPKCS1PrivateKey": true, + "MarshalPKCS1PublicKey": true, + "MarshalPKCS8PrivateKey": true, + "MarshalPKIXPublicKey": true, + "NameConstraintsWithoutSANs": true, + "NameMismatch": true, + "NewCertPool": true, + "NotAuthorizedToSign": true, + "PEMCipher": true, + "PEMCipher3DES": true, + "PEMCipherAES128": true, + "PEMCipherAES192": true, + "PEMCipherAES256": true, + "PEMCipherDES": true, + "ParseCRL": true, + "ParseCertificate": true, + "ParseCertificateRequest": true, + "ParseCertificates": true, + "ParseDERCRL": true, + "ParseECPrivateKey": true, + "ParsePKCS1PrivateKey": true, + "ParsePKCS1PublicKey": true, + "ParsePKCS8PrivateKey": true, + "ParsePKIXPublicKey": true, + "PublicKeyAlgorithm": true, + "PureEd25519": true, + "RSA": true, + "SHA1WithRSA": true, + "SHA256WithRSA": true, + "SHA256WithRSAPSS": true, + "SHA384WithRSA": true, + "SHA384WithRSAPSS": true, + "SHA512WithRSA": true, + "SHA512WithRSAPSS": true, + "SignatureAlgorithm": true, + "SystemCertPool": true, + "SystemRootsError": true, + "TooManyConstraints": true, + "TooManyIntermediates": true, + "UnconstrainedName": true, + "UnhandledCriticalExtension": true, + "UnknownAuthorityError": true, + "UnknownPublicKeyAlgorithm": true, + "UnknownSignatureAlgorithm": true, + "VerifyOptions": true, + }, + "crypto/x509/pkix": map[string]bool{ + "AlgorithmIdentifier": true, + "AttributeTypeAndValue": true, + "AttributeTypeAndValueSET": true, + "CertificateList": true, + "Extension": true, + "Name": true, + "RDNSequence": true, + "RelativeDistinguishedNameSET": true, + "RevokedCertificate": true, + "TBSCertificateList": true, + }, + "database/sql": map[string]bool{ + "ColumnType": true, + "Conn": true, + "DB": true, + "DBStats": true, + "Drivers": true, + "ErrConnDone": true, + "ErrNoRows": true, + "ErrTxDone": true, + "IsolationLevel": true, + "LevelDefault": true, + "LevelLinearizable": true, + "LevelReadCommitted": true, + "LevelReadUncommitted": true, + "LevelRepeatableRead": true, + "LevelSerializable": true, + "LevelSnapshot": true, + "LevelWriteCommitted": true, + "Named": true, + "NamedArg": true, + "NullBool": true, + "NullFloat64": true, + "NullInt32": true, + "NullInt64": true, + "NullString": true, + "NullTime": true, + "Open": true, + "OpenDB": true, + "Out": true, + "RawBytes": true, + "Register": true, + "Result": true, + "Row": true, + "Rows": true, + "Scanner": true, + "Stmt": true, + "Tx": true, + "TxOptions": true, + }, + "database/sql/driver": map[string]bool{ + "Bool": true, + "ColumnConverter": true, + "Conn": true, + "ConnBeginTx": true, + "ConnPrepareContext": true, + "Connector": true, + "DefaultParameterConverter": true, + "Driver": true, + "DriverContext": true, + "ErrBadConn": true, + "ErrRemoveArgument": true, + "ErrSkip": true, + "Execer": true, + "ExecerContext": true, + "Int32": true, + "IsScanValue": true, + "IsValue": true, + "IsolationLevel": true, + "NamedValue": true, + "NamedValueChecker": true, + "NotNull": true, + "Null": true, + "Pinger": true, + "Queryer": true, + "QueryerContext": true, + "Result": true, + "ResultNoRows": true, + "Rows": true, + "RowsAffected": true, + "RowsColumnTypeDatabaseTypeName": true, + "RowsColumnTypeLength": true, + "RowsColumnTypeNullable": true, + "RowsColumnTypePrecisionScale": true, + "RowsColumnTypeScanType": true, + "RowsNextResultSet": true, + "SessionResetter": true, + "Stmt": true, + "StmtExecContext": true, + "StmtQueryContext": true, + "String": true, + "Tx": true, + "TxOptions": true, + "Value": true, + "ValueConverter": true, + "Valuer": true, + }, + "debug/dwarf": map[string]bool{ + "AddrType": true, + "ArrayType": true, + "Attr": true, + "AttrAbstractOrigin": true, + "AttrAccessibility": true, + "AttrAddrClass": true, + "AttrAllocated": true, + "AttrArtificial": true, + "AttrAssociated": true, + "AttrBaseTypes": true, + "AttrBitOffset": true, + "AttrBitSize": true, + "AttrByteSize": true, + "AttrCallColumn": true, + "AttrCallFile": true, + "AttrCallLine": true, + "AttrCalling": true, + "AttrCommonRef": true, + "AttrCompDir": true, + "AttrConstValue": true, + "AttrContainingType": true, + "AttrCount": true, + "AttrDataLocation": true, + "AttrDataMemberLoc": true, + "AttrDeclColumn": true, + "AttrDeclFile": true, + "AttrDeclLine": true, + "AttrDeclaration": true, + "AttrDefaultValue": true, + "AttrDescription": true, + "AttrDiscr": true, + "AttrDiscrList": true, + "AttrDiscrValue": true, + "AttrEncoding": true, + "AttrEntrypc": true, + "AttrExtension": true, + "AttrExternal": true, + "AttrFrameBase": true, + "AttrFriend": true, + "AttrHighpc": true, + "AttrIdentifierCase": true, + "AttrImport": true, + "AttrInline": true, + "AttrIsOptional": true, + "AttrLanguage": true, + "AttrLocation": true, + "AttrLowerBound": true, + "AttrLowpc": true, + "AttrMacroInfo": true, + "AttrName": true, + "AttrNamelistItem": true, + "AttrOrdering": true, + "AttrPriority": true, + "AttrProducer": true, + "AttrPrototyped": true, + "AttrRanges": true, + "AttrReturnAddr": true, + "AttrSegment": true, + "AttrSibling": true, + "AttrSpecification": true, + "AttrStartScope": true, + "AttrStaticLink": true, + "AttrStmtList": true, + "AttrStride": true, + "AttrStrideSize": true, + "AttrStringLength": true, + "AttrTrampoline": true, + "AttrType": true, + "AttrUpperBound": true, + "AttrUseLocation": true, + "AttrUseUTF8": true, + "AttrVarParam": true, + "AttrVirtuality": true, + "AttrVisibility": true, + "AttrVtableElemLoc": true, + "BasicType": true, + "BoolType": true, + "CharType": true, + "Class": true, + "ClassAddress": true, + "ClassBlock": true, + "ClassConstant": true, + "ClassExprLoc": true, + "ClassFlag": true, + "ClassLinePtr": true, + "ClassLocListPtr": true, + "ClassMacPtr": true, + "ClassRangeListPtr": true, + "ClassReference": true, + "ClassReferenceAlt": true, + "ClassReferenceSig": true, + "ClassString": true, + "ClassStringAlt": true, + "ClassUnknown": true, + "CommonType": true, + "ComplexType": true, + "Data": true, + "DecodeError": true, + "DotDotDotType": true, + "Entry": true, + "EnumType": true, + "EnumValue": true, + "ErrUnknownPC": true, + "Field": true, + "FloatType": true, + "FuncType": true, + "IntType": true, + "LineEntry": true, + "LineFile": true, + "LineReader": true, + "LineReaderPos": true, + "New": true, + "Offset": true, + "PtrType": true, + "QualType": true, + "Reader": true, + "StructField": true, + "StructType": true, + "Tag": true, + "TagAccessDeclaration": true, + "TagArrayType": true, + "TagBaseType": true, + "TagCatchDwarfBlock": true, + "TagClassType": true, + "TagCommonDwarfBlock": true, + "TagCommonInclusion": true, + "TagCompileUnit": true, + "TagCondition": true, + "TagConstType": true, + "TagConstant": true, + "TagDwarfProcedure": true, + "TagEntryPoint": true, + "TagEnumerationType": true, + "TagEnumerator": true, + "TagFileType": true, + "TagFormalParameter": true, + "TagFriend": true, + "TagImportedDeclaration": true, + "TagImportedModule": true, + "TagImportedUnit": true, + "TagInheritance": true, + "TagInlinedSubroutine": true, + "TagInterfaceType": true, + "TagLabel": true, + "TagLexDwarfBlock": true, + "TagMember": true, + "TagModule": true, + "TagMutableType": true, + "TagNamelist": true, + "TagNamelistItem": true, + "TagNamespace": true, + "TagPackedType": true, + "TagPartialUnit": true, + "TagPointerType": true, + "TagPtrToMemberType": true, + "TagReferenceType": true, + "TagRestrictType": true, + "TagRvalueReferenceType": true, + "TagSetType": true, + "TagSharedType": true, + "TagStringType": true, + "TagStructType": true, + "TagSubprogram": true, + "TagSubrangeType": true, + "TagSubroutineType": true, + "TagTemplateAlias": true, + "TagTemplateTypeParameter": true, + "TagTemplateValueParameter": true, + "TagThrownType": true, + "TagTryDwarfBlock": true, + "TagTypeUnit": true, + "TagTypedef": true, + "TagUnionType": true, + "TagUnspecifiedParameters": true, + "TagUnspecifiedType": true, + "TagVariable": true, + "TagVariant": true, + "TagVariantPart": true, + "TagVolatileType": true, + "TagWithStmt": true, + "Type": true, + "TypedefType": true, + "UcharType": true, + "UintType": true, + "UnspecifiedType": true, + "UnsupportedType": true, + "VoidType": true, + }, + "debug/elf": map[string]bool{ + "ARM_MAGIC_TRAMP_NUMBER": true, + "COMPRESS_HIOS": true, + "COMPRESS_HIPROC": true, + "COMPRESS_LOOS": true, + "COMPRESS_LOPROC": true, + "COMPRESS_ZLIB": true, + "Chdr32": true, + "Chdr64": true, + "Class": true, + "CompressionType": true, + "DF_BIND_NOW": true, + "DF_ORIGIN": true, + "DF_STATIC_TLS": true, + "DF_SYMBOLIC": true, + "DF_TEXTREL": true, + "DT_BIND_NOW": true, + "DT_DEBUG": true, + "DT_ENCODING": true, + "DT_FINI": true, + "DT_FINI_ARRAY": true, + "DT_FINI_ARRAYSZ": true, + "DT_FLAGS": true, + "DT_HASH": true, + "DT_HIOS": true, + "DT_HIPROC": true, + "DT_INIT": true, + "DT_INIT_ARRAY": true, + "DT_INIT_ARRAYSZ": true, + "DT_JMPREL": true, + "DT_LOOS": true, + "DT_LOPROC": true, + "DT_NEEDED": true, + "DT_NULL": true, + "DT_PLTGOT": true, + "DT_PLTREL": true, + "DT_PLTRELSZ": true, + "DT_PREINIT_ARRAY": true, + "DT_PREINIT_ARRAYSZ": true, + "DT_REL": true, + "DT_RELA": true, + "DT_RELAENT": true, + "DT_RELASZ": true, + "DT_RELENT": true, + "DT_RELSZ": true, + "DT_RPATH": true, + "DT_RUNPATH": true, + "DT_SONAME": true, + "DT_STRSZ": true, + "DT_STRTAB": true, + "DT_SYMBOLIC": true, + "DT_SYMENT": true, + "DT_SYMTAB": true, + "DT_TEXTREL": true, + "DT_VERNEED": true, + "DT_VERNEEDNUM": true, + "DT_VERSYM": true, + "Data": true, + "Dyn32": true, + "Dyn64": true, + "DynFlag": true, + "DynTag": true, + "EI_ABIVERSION": true, + "EI_CLASS": true, + "EI_DATA": true, + "EI_NIDENT": true, + "EI_OSABI": true, + "EI_PAD": true, + "EI_VERSION": true, + "ELFCLASS32": true, + "ELFCLASS64": true, + "ELFCLASSNONE": true, + "ELFDATA2LSB": true, + "ELFDATA2MSB": true, + "ELFDATANONE": true, + "ELFMAG": true, + "ELFOSABI_86OPEN": true, + "ELFOSABI_AIX": true, + "ELFOSABI_ARM": true, + "ELFOSABI_AROS": true, + "ELFOSABI_CLOUDABI": true, + "ELFOSABI_FENIXOS": true, + "ELFOSABI_FREEBSD": true, + "ELFOSABI_HPUX": true, + "ELFOSABI_HURD": true, + "ELFOSABI_IRIX": true, + "ELFOSABI_LINUX": true, + "ELFOSABI_MODESTO": true, + "ELFOSABI_NETBSD": true, + "ELFOSABI_NONE": true, + "ELFOSABI_NSK": true, + "ELFOSABI_OPENBSD": true, + "ELFOSABI_OPENVMS": true, + "ELFOSABI_SOLARIS": true, + "ELFOSABI_STANDALONE": true, + "ELFOSABI_TRU64": true, + "EM_386": true, + "EM_486": true, + "EM_56800EX": true, + "EM_68HC05": true, + "EM_68HC08": true, + "EM_68HC11": true, + "EM_68HC12": true, + "EM_68HC16": true, + "EM_68K": true, + "EM_78KOR": true, + "EM_8051": true, + "EM_860": true, + "EM_88K": true, + "EM_960": true, + "EM_AARCH64": true, + "EM_ALPHA": true, + "EM_ALPHA_STD": true, + "EM_ALTERA_NIOS2": true, + "EM_AMDGPU": true, + "EM_ARC": true, + "EM_ARCA": true, + "EM_ARC_COMPACT": true, + "EM_ARC_COMPACT2": true, + "EM_ARM": true, + "EM_AVR": true, + "EM_AVR32": true, + "EM_BA1": true, + "EM_BA2": true, + "EM_BLACKFIN": true, + "EM_BPF": true, + "EM_C166": true, + "EM_CDP": true, + "EM_CE": true, + "EM_CLOUDSHIELD": true, + "EM_COGE": true, + "EM_COLDFIRE": true, + "EM_COOL": true, + "EM_COREA_1ST": true, + "EM_COREA_2ND": true, + "EM_CR": true, + "EM_CR16": true, + "EM_CRAYNV2": true, + "EM_CRIS": true, + "EM_CRX": true, + "EM_CSR_KALIMBA": true, + "EM_CUDA": true, + "EM_CYPRESS_M8C": true, + "EM_D10V": true, + "EM_D30V": true, + "EM_DSP24": true, + "EM_DSPIC30F": true, + "EM_DXP": true, + "EM_ECOG1": true, + "EM_ECOG16": true, + "EM_ECOG1X": true, + "EM_ECOG2": true, + "EM_ETPU": true, + "EM_EXCESS": true, + "EM_F2MC16": true, + "EM_FIREPATH": true, + "EM_FR20": true, + "EM_FR30": true, + "EM_FT32": true, + "EM_FX66": true, + "EM_H8S": true, + "EM_H8_300": true, + "EM_H8_300H": true, + "EM_H8_500": true, + "EM_HUANY": true, + "EM_IA_64": true, + "EM_INTEL205": true, + "EM_INTEL206": true, + "EM_INTEL207": true, + "EM_INTEL208": true, + "EM_INTEL209": true, + "EM_IP2K": true, + "EM_JAVELIN": true, + "EM_K10M": true, + "EM_KM32": true, + "EM_KMX16": true, + "EM_KMX32": true, + "EM_KMX8": true, + "EM_KVARC": true, + "EM_L10M": true, + "EM_LANAI": true, + "EM_LATTICEMICO32": true, + "EM_M16C": true, + "EM_M32": true, + "EM_M32C": true, + "EM_M32R": true, + "EM_MANIK": true, + "EM_MAX": true, + "EM_MAXQ30": true, + "EM_MCHP_PIC": true, + "EM_MCST_ELBRUS": true, + "EM_ME16": true, + "EM_METAG": true, + "EM_MICROBLAZE": true, + "EM_MIPS": true, + "EM_MIPS_RS3_LE": true, + "EM_MIPS_RS4_BE": true, + "EM_MIPS_X": true, + "EM_MMA": true, + "EM_MMDSP_PLUS": true, + "EM_MMIX": true, + "EM_MN10200": true, + "EM_MN10300": true, + "EM_MOXIE": true, + "EM_MSP430": true, + "EM_NCPU": true, + "EM_NDR1": true, + "EM_NDS32": true, + "EM_NONE": true, + "EM_NORC": true, + "EM_NS32K": true, + "EM_OPEN8": true, + "EM_OPENRISC": true, + "EM_PARISC": true, + "EM_PCP": true, + "EM_PDP10": true, + "EM_PDP11": true, + "EM_PDSP": true, + "EM_PJ": true, + "EM_PPC": true, + "EM_PPC64": true, + "EM_PRISM": true, + "EM_QDSP6": true, + "EM_R32C": true, + "EM_RCE": true, + "EM_RH32": true, + "EM_RISCV": true, + "EM_RL78": true, + "EM_RS08": true, + "EM_RX": true, + "EM_S370": true, + "EM_S390": true, + "EM_SCORE7": true, + "EM_SEP": true, + "EM_SE_C17": true, + "EM_SE_C33": true, + "EM_SH": true, + "EM_SHARC": true, + "EM_SLE9X": true, + "EM_SNP1K": true, + "EM_SPARC": true, + "EM_SPARC32PLUS": true, + "EM_SPARCV9": true, + "EM_ST100": true, + "EM_ST19": true, + "EM_ST200": true, + "EM_ST7": true, + "EM_ST9PLUS": true, + "EM_STARCORE": true, + "EM_STM8": true, + "EM_STXP7X": true, + "EM_SVX": true, + "EM_TILE64": true, + "EM_TILEGX": true, + "EM_TILEPRO": true, + "EM_TINYJ": true, + "EM_TI_ARP32": true, + "EM_TI_C2000": true, + "EM_TI_C5500": true, + "EM_TI_C6000": true, + "EM_TI_PRU": true, + "EM_TMM_GPP": true, + "EM_TPC": true, + "EM_TRICORE": true, + "EM_TRIMEDIA": true, + "EM_TSK3000": true, + "EM_UNICORE": true, + "EM_V800": true, + "EM_V850": true, + "EM_VAX": true, + "EM_VIDEOCORE": true, + "EM_VIDEOCORE3": true, + "EM_VIDEOCORE5": true, + "EM_VISIUM": true, + "EM_VPP500": true, + "EM_X86_64": true, + "EM_XCORE": true, + "EM_XGATE": true, + "EM_XIMO16": true, + "EM_XTENSA": true, + "EM_Z80": true, + "EM_ZSP": true, + "ET_CORE": true, + "ET_DYN": true, + "ET_EXEC": true, + "ET_HIOS": true, + "ET_HIPROC": true, + "ET_LOOS": true, + "ET_LOPROC": true, + "ET_NONE": true, + "ET_REL": true, + "EV_CURRENT": true, + "EV_NONE": true, + "ErrNoSymbols": true, + "File": true, + "FileHeader": true, + "FormatError": true, + "Header32": true, + "Header64": true, + "ImportedSymbol": true, + "Machine": true, + "NT_FPREGSET": true, + "NT_PRPSINFO": true, + "NT_PRSTATUS": true, + "NType": true, + "NewFile": true, + "OSABI": true, + "Open": true, + "PF_MASKOS": true, + "PF_MASKPROC": true, + "PF_R": true, + "PF_W": true, + "PF_X": true, + "PT_DYNAMIC": true, + "PT_HIOS": true, + "PT_HIPROC": true, + "PT_INTERP": true, + "PT_LOAD": true, + "PT_LOOS": true, + "PT_LOPROC": true, + "PT_NOTE": true, + "PT_NULL": true, + "PT_PHDR": true, + "PT_SHLIB": true, + "PT_TLS": true, + "Prog": true, + "Prog32": true, + "Prog64": true, + "ProgFlag": true, + "ProgHeader": true, + "ProgType": true, + "R_386": true, + "R_386_16": true, + "R_386_32": true, + "R_386_32PLT": true, + "R_386_8": true, + "R_386_COPY": true, + "R_386_GLOB_DAT": true, + "R_386_GOT32": true, + "R_386_GOT32X": true, + "R_386_GOTOFF": true, + "R_386_GOTPC": true, + "R_386_IRELATIVE": true, + "R_386_JMP_SLOT": true, + "R_386_NONE": true, + "R_386_PC16": true, + "R_386_PC32": true, + "R_386_PC8": true, + "R_386_PLT32": true, + "R_386_RELATIVE": true, + "R_386_SIZE32": true, + "R_386_TLS_DESC": true, + "R_386_TLS_DESC_CALL": true, + "R_386_TLS_DTPMOD32": true, + "R_386_TLS_DTPOFF32": true, + "R_386_TLS_GD": true, + "R_386_TLS_GD_32": true, + "R_386_TLS_GD_CALL": true, + "R_386_TLS_GD_POP": true, + "R_386_TLS_GD_PUSH": true, + "R_386_TLS_GOTDESC": true, + "R_386_TLS_GOTIE": true, + "R_386_TLS_IE": true, + "R_386_TLS_IE_32": true, + "R_386_TLS_LDM": true, + "R_386_TLS_LDM_32": true, + "R_386_TLS_LDM_CALL": true, + "R_386_TLS_LDM_POP": true, + "R_386_TLS_LDM_PUSH": true, + "R_386_TLS_LDO_32": true, + "R_386_TLS_LE": true, + "R_386_TLS_LE_32": true, + "R_386_TLS_TPOFF": true, + "R_386_TLS_TPOFF32": true, + "R_390": true, + "R_390_12": true, + "R_390_16": true, + "R_390_20": true, + "R_390_32": true, + "R_390_64": true, + "R_390_8": true, + "R_390_COPY": true, + "R_390_GLOB_DAT": true, + "R_390_GOT12": true, + "R_390_GOT16": true, + "R_390_GOT20": true, + "R_390_GOT32": true, + "R_390_GOT64": true, + "R_390_GOTENT": true, + "R_390_GOTOFF": true, + "R_390_GOTOFF16": true, + "R_390_GOTOFF64": true, + "R_390_GOTPC": true, + "R_390_GOTPCDBL": true, + "R_390_GOTPLT12": true, + "R_390_GOTPLT16": true, + "R_390_GOTPLT20": true, + "R_390_GOTPLT32": true, + "R_390_GOTPLT64": true, + "R_390_GOTPLTENT": true, + "R_390_GOTPLTOFF16": true, + "R_390_GOTPLTOFF32": true, + "R_390_GOTPLTOFF64": true, + "R_390_JMP_SLOT": true, + "R_390_NONE": true, + "R_390_PC16": true, + "R_390_PC16DBL": true, + "R_390_PC32": true, + "R_390_PC32DBL": true, + "R_390_PC64": true, + "R_390_PLT16DBL": true, + "R_390_PLT32": true, + "R_390_PLT32DBL": true, + "R_390_PLT64": true, + "R_390_RELATIVE": true, + "R_390_TLS_DTPMOD": true, + "R_390_TLS_DTPOFF": true, + "R_390_TLS_GD32": true, + "R_390_TLS_GD64": true, + "R_390_TLS_GDCALL": true, + "R_390_TLS_GOTIE12": true, + "R_390_TLS_GOTIE20": true, + "R_390_TLS_GOTIE32": true, + "R_390_TLS_GOTIE64": true, + "R_390_TLS_IE32": true, + "R_390_TLS_IE64": true, + "R_390_TLS_IEENT": true, + "R_390_TLS_LDCALL": true, + "R_390_TLS_LDM32": true, + "R_390_TLS_LDM64": true, + "R_390_TLS_LDO32": true, + "R_390_TLS_LDO64": true, + "R_390_TLS_LE32": true, + "R_390_TLS_LE64": true, + "R_390_TLS_LOAD": true, + "R_390_TLS_TPOFF": true, + "R_AARCH64": true, + "R_AARCH64_ABS16": true, + "R_AARCH64_ABS32": true, + "R_AARCH64_ABS64": true, + "R_AARCH64_ADD_ABS_LO12_NC": true, + "R_AARCH64_ADR_GOT_PAGE": true, + "R_AARCH64_ADR_PREL_LO21": true, + "R_AARCH64_ADR_PREL_PG_HI21": true, + "R_AARCH64_ADR_PREL_PG_HI21_NC": true, + "R_AARCH64_CALL26": true, + "R_AARCH64_CONDBR19": true, + "R_AARCH64_COPY": true, + "R_AARCH64_GLOB_DAT": true, + "R_AARCH64_GOT_LD_PREL19": true, + "R_AARCH64_IRELATIVE": true, + "R_AARCH64_JUMP26": true, + "R_AARCH64_JUMP_SLOT": true, + "R_AARCH64_LD64_GOTOFF_LO15": true, + "R_AARCH64_LD64_GOTPAGE_LO15": true, + "R_AARCH64_LD64_GOT_LO12_NC": true, + "R_AARCH64_LDST128_ABS_LO12_NC": true, + "R_AARCH64_LDST16_ABS_LO12_NC": true, + "R_AARCH64_LDST32_ABS_LO12_NC": true, + "R_AARCH64_LDST64_ABS_LO12_NC": true, + "R_AARCH64_LDST8_ABS_LO12_NC": true, + "R_AARCH64_LD_PREL_LO19": true, + "R_AARCH64_MOVW_SABS_G0": true, + "R_AARCH64_MOVW_SABS_G1": true, + "R_AARCH64_MOVW_SABS_G2": true, + "R_AARCH64_MOVW_UABS_G0": true, + "R_AARCH64_MOVW_UABS_G0_NC": true, + "R_AARCH64_MOVW_UABS_G1": true, + "R_AARCH64_MOVW_UABS_G1_NC": true, + "R_AARCH64_MOVW_UABS_G2": true, + "R_AARCH64_MOVW_UABS_G2_NC": true, + "R_AARCH64_MOVW_UABS_G3": true, + "R_AARCH64_NONE": true, + "R_AARCH64_NULL": true, + "R_AARCH64_P32_ABS16": true, + "R_AARCH64_P32_ABS32": true, + "R_AARCH64_P32_ADD_ABS_LO12_NC": true, + "R_AARCH64_P32_ADR_GOT_PAGE": true, + "R_AARCH64_P32_ADR_PREL_LO21": true, + "R_AARCH64_P32_ADR_PREL_PG_HI21": true, + "R_AARCH64_P32_CALL26": true, + "R_AARCH64_P32_CONDBR19": true, + "R_AARCH64_P32_COPY": true, + "R_AARCH64_P32_GLOB_DAT": true, + "R_AARCH64_P32_GOT_LD_PREL19": true, + "R_AARCH64_P32_IRELATIVE": true, + "R_AARCH64_P32_JUMP26": true, + "R_AARCH64_P32_JUMP_SLOT": true, + "R_AARCH64_P32_LD32_GOT_LO12_NC": true, + "R_AARCH64_P32_LDST128_ABS_LO12_NC": true, + "R_AARCH64_P32_LDST16_ABS_LO12_NC": true, + "R_AARCH64_P32_LDST32_ABS_LO12_NC": true, + "R_AARCH64_P32_LDST64_ABS_LO12_NC": true, + "R_AARCH64_P32_LDST8_ABS_LO12_NC": true, + "R_AARCH64_P32_LD_PREL_LO19": true, + "R_AARCH64_P32_MOVW_SABS_G0": true, + "R_AARCH64_P32_MOVW_UABS_G0": true, + "R_AARCH64_P32_MOVW_UABS_G0_NC": true, + "R_AARCH64_P32_MOVW_UABS_G1": true, + "R_AARCH64_P32_PREL16": true, + "R_AARCH64_P32_PREL32": true, + "R_AARCH64_P32_RELATIVE": true, + "R_AARCH64_P32_TLSDESC": true, + "R_AARCH64_P32_TLSDESC_ADD_LO12_NC": true, + "R_AARCH64_P32_TLSDESC_ADR_PAGE21": true, + "R_AARCH64_P32_TLSDESC_ADR_PREL21": true, + "R_AARCH64_P32_TLSDESC_CALL": true, + "R_AARCH64_P32_TLSDESC_LD32_LO12_NC": true, + "R_AARCH64_P32_TLSDESC_LD_PREL19": true, + "R_AARCH64_P32_TLSGD_ADD_LO12_NC": true, + "R_AARCH64_P32_TLSGD_ADR_PAGE21": true, + "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21": true, + "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC": true, + "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19": true, + "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12": true, + "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12": true, + "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC": true, + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0": true, + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC": true, + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1": true, + "R_AARCH64_P32_TLS_DTPMOD": true, + "R_AARCH64_P32_TLS_DTPREL": true, + "R_AARCH64_P32_TLS_TPREL": true, + "R_AARCH64_P32_TSTBR14": true, + "R_AARCH64_PREL16": true, + "R_AARCH64_PREL32": true, + "R_AARCH64_PREL64": true, + "R_AARCH64_RELATIVE": true, + "R_AARCH64_TLSDESC": true, + "R_AARCH64_TLSDESC_ADD": true, + "R_AARCH64_TLSDESC_ADD_LO12_NC": true, + "R_AARCH64_TLSDESC_ADR_PAGE21": true, + "R_AARCH64_TLSDESC_ADR_PREL21": true, + "R_AARCH64_TLSDESC_CALL": true, + "R_AARCH64_TLSDESC_LD64_LO12_NC": true, + "R_AARCH64_TLSDESC_LDR": true, + "R_AARCH64_TLSDESC_LD_PREL19": true, + "R_AARCH64_TLSDESC_OFF_G0_NC": true, + "R_AARCH64_TLSDESC_OFF_G1": true, + "R_AARCH64_TLSGD_ADD_LO12_NC": true, + "R_AARCH64_TLSGD_ADR_PAGE21": true, + "R_AARCH64_TLSGD_ADR_PREL21": true, + "R_AARCH64_TLSGD_MOVW_G0_NC": true, + "R_AARCH64_TLSGD_MOVW_G1": true, + "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21": true, + "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC": true, + "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19": true, + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC": true, + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1": true, + "R_AARCH64_TLSLD_ADR_PAGE21": true, + "R_AARCH64_TLSLD_ADR_PREL21": true, + "R_AARCH64_TLSLD_LDST128_DTPREL_LO12": true, + "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC": true, + "R_AARCH64_TLSLE_ADD_TPREL_HI12": true, + "R_AARCH64_TLSLE_ADD_TPREL_LO12": true, + "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC": true, + "R_AARCH64_TLSLE_LDST128_TPREL_LO12": true, + "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC": true, + "R_AARCH64_TLSLE_MOVW_TPREL_G0": true, + "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC": true, + "R_AARCH64_TLSLE_MOVW_TPREL_G1": true, + "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC": true, + "R_AARCH64_TLSLE_MOVW_TPREL_G2": true, + "R_AARCH64_TLS_DTPMOD64": true, + "R_AARCH64_TLS_DTPREL64": true, + "R_AARCH64_TLS_TPREL64": true, + "R_AARCH64_TSTBR14": true, + "R_ALPHA": true, + "R_ALPHA_BRADDR": true, + "R_ALPHA_COPY": true, + "R_ALPHA_GLOB_DAT": true, + "R_ALPHA_GPDISP": true, + "R_ALPHA_GPREL32": true, + "R_ALPHA_GPRELHIGH": true, + "R_ALPHA_GPRELLOW": true, + "R_ALPHA_GPVALUE": true, + "R_ALPHA_HINT": true, + "R_ALPHA_IMMED_BR_HI32": true, + "R_ALPHA_IMMED_GP_16": true, + "R_ALPHA_IMMED_GP_HI32": true, + "R_ALPHA_IMMED_LO32": true, + "R_ALPHA_IMMED_SCN_HI32": true, + "R_ALPHA_JMP_SLOT": true, + "R_ALPHA_LITERAL": true, + "R_ALPHA_LITUSE": true, + "R_ALPHA_NONE": true, + "R_ALPHA_OP_PRSHIFT": true, + "R_ALPHA_OP_PSUB": true, + "R_ALPHA_OP_PUSH": true, + "R_ALPHA_OP_STORE": true, + "R_ALPHA_REFLONG": true, + "R_ALPHA_REFQUAD": true, + "R_ALPHA_RELATIVE": true, + "R_ALPHA_SREL16": true, + "R_ALPHA_SREL32": true, + "R_ALPHA_SREL64": true, + "R_ARM": true, + "R_ARM_ABS12": true, + "R_ARM_ABS16": true, + "R_ARM_ABS32": true, + "R_ARM_ABS32_NOI": true, + "R_ARM_ABS8": true, + "R_ARM_ALU_PCREL_15_8": true, + "R_ARM_ALU_PCREL_23_15": true, + "R_ARM_ALU_PCREL_7_0": true, + "R_ARM_ALU_PC_G0": true, + "R_ARM_ALU_PC_G0_NC": true, + "R_ARM_ALU_PC_G1": true, + "R_ARM_ALU_PC_G1_NC": true, + "R_ARM_ALU_PC_G2": true, + "R_ARM_ALU_SBREL_19_12_NC": true, + "R_ARM_ALU_SBREL_27_20_CK": true, + "R_ARM_ALU_SB_G0": true, + "R_ARM_ALU_SB_G0_NC": true, + "R_ARM_ALU_SB_G1": true, + "R_ARM_ALU_SB_G1_NC": true, + "R_ARM_ALU_SB_G2": true, + "R_ARM_AMP_VCALL9": true, + "R_ARM_BASE_ABS": true, + "R_ARM_CALL": true, + "R_ARM_COPY": true, + "R_ARM_GLOB_DAT": true, + "R_ARM_GNU_VTENTRY": true, + "R_ARM_GNU_VTINHERIT": true, + "R_ARM_GOT32": true, + "R_ARM_GOTOFF": true, + "R_ARM_GOTOFF12": true, + "R_ARM_GOTPC": true, + "R_ARM_GOTRELAX": true, + "R_ARM_GOT_ABS": true, + "R_ARM_GOT_BREL12": true, + "R_ARM_GOT_PREL": true, + "R_ARM_IRELATIVE": true, + "R_ARM_JUMP24": true, + "R_ARM_JUMP_SLOT": true, + "R_ARM_LDC_PC_G0": true, + "R_ARM_LDC_PC_G1": true, + "R_ARM_LDC_PC_G2": true, + "R_ARM_LDC_SB_G0": true, + "R_ARM_LDC_SB_G1": true, + "R_ARM_LDC_SB_G2": true, + "R_ARM_LDRS_PC_G0": true, + "R_ARM_LDRS_PC_G1": true, + "R_ARM_LDRS_PC_G2": true, + "R_ARM_LDRS_SB_G0": true, + "R_ARM_LDRS_SB_G1": true, + "R_ARM_LDRS_SB_G2": true, + "R_ARM_LDR_PC_G1": true, + "R_ARM_LDR_PC_G2": true, + "R_ARM_LDR_SBREL_11_10_NC": true, + "R_ARM_LDR_SB_G0": true, + "R_ARM_LDR_SB_G1": true, + "R_ARM_LDR_SB_G2": true, + "R_ARM_ME_TOO": true, + "R_ARM_MOVT_ABS": true, + "R_ARM_MOVT_BREL": true, + "R_ARM_MOVT_PREL": true, + "R_ARM_MOVW_ABS_NC": true, + "R_ARM_MOVW_BREL": true, + "R_ARM_MOVW_BREL_NC": true, + "R_ARM_MOVW_PREL_NC": true, + "R_ARM_NONE": true, + "R_ARM_PC13": true, + "R_ARM_PC24": true, + "R_ARM_PLT32": true, + "R_ARM_PLT32_ABS": true, + "R_ARM_PREL31": true, + "R_ARM_PRIVATE_0": true, + "R_ARM_PRIVATE_1": true, + "R_ARM_PRIVATE_10": true, + "R_ARM_PRIVATE_11": true, + "R_ARM_PRIVATE_12": true, + "R_ARM_PRIVATE_13": true, + "R_ARM_PRIVATE_14": true, + "R_ARM_PRIVATE_15": true, + "R_ARM_PRIVATE_2": true, + "R_ARM_PRIVATE_3": true, + "R_ARM_PRIVATE_4": true, + "R_ARM_PRIVATE_5": true, + "R_ARM_PRIVATE_6": true, + "R_ARM_PRIVATE_7": true, + "R_ARM_PRIVATE_8": true, + "R_ARM_PRIVATE_9": true, + "R_ARM_RABS32": true, + "R_ARM_RBASE": true, + "R_ARM_REL32": true, + "R_ARM_REL32_NOI": true, + "R_ARM_RELATIVE": true, + "R_ARM_RPC24": true, + "R_ARM_RREL32": true, + "R_ARM_RSBREL32": true, + "R_ARM_RXPC25": true, + "R_ARM_SBREL31": true, + "R_ARM_SBREL32": true, + "R_ARM_SWI24": true, + "R_ARM_TARGET1": true, + "R_ARM_TARGET2": true, + "R_ARM_THM_ABS5": true, + "R_ARM_THM_ALU_ABS_G0_NC": true, + "R_ARM_THM_ALU_ABS_G1_NC": true, + "R_ARM_THM_ALU_ABS_G2_NC": true, + "R_ARM_THM_ALU_ABS_G3": true, + "R_ARM_THM_ALU_PREL_11_0": true, + "R_ARM_THM_GOT_BREL12": true, + "R_ARM_THM_JUMP11": true, + "R_ARM_THM_JUMP19": true, + "R_ARM_THM_JUMP24": true, + "R_ARM_THM_JUMP6": true, + "R_ARM_THM_JUMP8": true, + "R_ARM_THM_MOVT_ABS": true, + "R_ARM_THM_MOVT_BREL": true, + "R_ARM_THM_MOVT_PREL": true, + "R_ARM_THM_MOVW_ABS_NC": true, + "R_ARM_THM_MOVW_BREL": true, + "R_ARM_THM_MOVW_BREL_NC": true, + "R_ARM_THM_MOVW_PREL_NC": true, + "R_ARM_THM_PC12": true, + "R_ARM_THM_PC22": true, + "R_ARM_THM_PC8": true, + "R_ARM_THM_RPC22": true, + "R_ARM_THM_SWI8": true, + "R_ARM_THM_TLS_CALL": true, + "R_ARM_THM_TLS_DESCSEQ16": true, + "R_ARM_THM_TLS_DESCSEQ32": true, + "R_ARM_THM_XPC22": true, + "R_ARM_TLS_CALL": true, + "R_ARM_TLS_DESCSEQ": true, + "R_ARM_TLS_DTPMOD32": true, + "R_ARM_TLS_DTPOFF32": true, + "R_ARM_TLS_GD32": true, + "R_ARM_TLS_GOTDESC": true, + "R_ARM_TLS_IE12GP": true, + "R_ARM_TLS_IE32": true, + "R_ARM_TLS_LDM32": true, + "R_ARM_TLS_LDO12": true, + "R_ARM_TLS_LDO32": true, + "R_ARM_TLS_LE12": true, + "R_ARM_TLS_LE32": true, + "R_ARM_TLS_TPOFF32": true, + "R_ARM_V4BX": true, + "R_ARM_XPC25": true, + "R_INFO": true, + "R_INFO32": true, + "R_MIPS": true, + "R_MIPS_16": true, + "R_MIPS_26": true, + "R_MIPS_32": true, + "R_MIPS_64": true, + "R_MIPS_ADD_IMMEDIATE": true, + "R_MIPS_CALL16": true, + "R_MIPS_CALL_HI16": true, + "R_MIPS_CALL_LO16": true, + "R_MIPS_DELETE": true, + "R_MIPS_GOT16": true, + "R_MIPS_GOT_DISP": true, + "R_MIPS_GOT_HI16": true, + "R_MIPS_GOT_LO16": true, + "R_MIPS_GOT_OFST": true, + "R_MIPS_GOT_PAGE": true, + "R_MIPS_GPREL16": true, + "R_MIPS_GPREL32": true, + "R_MIPS_HI16": true, + "R_MIPS_HIGHER": true, + "R_MIPS_HIGHEST": true, + "R_MIPS_INSERT_A": true, + "R_MIPS_INSERT_B": true, + "R_MIPS_JALR": true, + "R_MIPS_LITERAL": true, + "R_MIPS_LO16": true, + "R_MIPS_NONE": true, + "R_MIPS_PC16": true, + "R_MIPS_PJUMP": true, + "R_MIPS_REL16": true, + "R_MIPS_REL32": true, + "R_MIPS_RELGOT": true, + "R_MIPS_SCN_DISP": true, + "R_MIPS_SHIFT5": true, + "R_MIPS_SHIFT6": true, + "R_MIPS_SUB": true, + "R_MIPS_TLS_DTPMOD32": true, + "R_MIPS_TLS_DTPMOD64": true, + "R_MIPS_TLS_DTPREL32": true, + "R_MIPS_TLS_DTPREL64": true, + "R_MIPS_TLS_DTPREL_HI16": true, + "R_MIPS_TLS_DTPREL_LO16": true, + "R_MIPS_TLS_GD": true, + "R_MIPS_TLS_GOTTPREL": true, + "R_MIPS_TLS_LDM": true, + "R_MIPS_TLS_TPREL32": true, + "R_MIPS_TLS_TPREL64": true, + "R_MIPS_TLS_TPREL_HI16": true, + "R_MIPS_TLS_TPREL_LO16": true, + "R_PPC": true, + "R_PPC64": true, + "R_PPC64_ADDR14": true, + "R_PPC64_ADDR14_BRNTAKEN": true, + "R_PPC64_ADDR14_BRTAKEN": true, + "R_PPC64_ADDR16": true, + "R_PPC64_ADDR16_DS": true, + "R_PPC64_ADDR16_HA": true, + "R_PPC64_ADDR16_HI": true, + "R_PPC64_ADDR16_HIGH": true, + "R_PPC64_ADDR16_HIGHA": true, + "R_PPC64_ADDR16_HIGHER": true, + "R_PPC64_ADDR16_HIGHERA": true, + "R_PPC64_ADDR16_HIGHEST": true, + "R_PPC64_ADDR16_HIGHESTA": true, + "R_PPC64_ADDR16_LO": true, + "R_PPC64_ADDR16_LO_DS": true, + "R_PPC64_ADDR24": true, + "R_PPC64_ADDR32": true, + "R_PPC64_ADDR64": true, + "R_PPC64_ADDR64_LOCAL": true, + "R_PPC64_DTPMOD64": true, + "R_PPC64_DTPREL16": true, + "R_PPC64_DTPREL16_DS": true, + "R_PPC64_DTPREL16_HA": true, + "R_PPC64_DTPREL16_HI": true, + "R_PPC64_DTPREL16_HIGH": true, + "R_PPC64_DTPREL16_HIGHA": true, + "R_PPC64_DTPREL16_HIGHER": true, + "R_PPC64_DTPREL16_HIGHERA": true, + "R_PPC64_DTPREL16_HIGHEST": true, + "R_PPC64_DTPREL16_HIGHESTA": true, + "R_PPC64_DTPREL16_LO": true, + "R_PPC64_DTPREL16_LO_DS": true, + "R_PPC64_DTPREL64": true, + "R_PPC64_ENTRY": true, + "R_PPC64_GOT16": true, + "R_PPC64_GOT16_DS": true, + "R_PPC64_GOT16_HA": true, + "R_PPC64_GOT16_HI": true, + "R_PPC64_GOT16_LO": true, + "R_PPC64_GOT16_LO_DS": true, + "R_PPC64_GOT_DTPREL16_DS": true, + "R_PPC64_GOT_DTPREL16_HA": true, + "R_PPC64_GOT_DTPREL16_HI": true, + "R_PPC64_GOT_DTPREL16_LO_DS": true, + "R_PPC64_GOT_TLSGD16": true, + "R_PPC64_GOT_TLSGD16_HA": true, + "R_PPC64_GOT_TLSGD16_HI": true, + "R_PPC64_GOT_TLSGD16_LO": true, + "R_PPC64_GOT_TLSLD16": true, + "R_PPC64_GOT_TLSLD16_HA": true, + "R_PPC64_GOT_TLSLD16_HI": true, + "R_PPC64_GOT_TLSLD16_LO": true, + "R_PPC64_GOT_TPREL16_DS": true, + "R_PPC64_GOT_TPREL16_HA": true, + "R_PPC64_GOT_TPREL16_HI": true, + "R_PPC64_GOT_TPREL16_LO_DS": true, + "R_PPC64_IRELATIVE": true, + "R_PPC64_JMP_IREL": true, + "R_PPC64_JMP_SLOT": true, + "R_PPC64_NONE": true, + "R_PPC64_PLT16_LO_DS": true, + "R_PPC64_PLTGOT16": true, + "R_PPC64_PLTGOT16_DS": true, + "R_PPC64_PLTGOT16_HA": true, + "R_PPC64_PLTGOT16_HI": true, + "R_PPC64_PLTGOT16_LO": true, + "R_PPC64_PLTGOT_LO_DS": true, + "R_PPC64_REL14": true, + "R_PPC64_REL14_BRNTAKEN": true, + "R_PPC64_REL14_BRTAKEN": true, + "R_PPC64_REL16": true, + "R_PPC64_REL16DX_HA": true, + "R_PPC64_REL16_HA": true, + "R_PPC64_REL16_HI": true, + "R_PPC64_REL16_LO": true, + "R_PPC64_REL24": true, + "R_PPC64_REL24_NOTOC": true, + "R_PPC64_REL32": true, + "R_PPC64_REL64": true, + "R_PPC64_SECTOFF_DS": true, + "R_PPC64_SECTOFF_LO_DS": true, + "R_PPC64_TLS": true, + "R_PPC64_TLSGD": true, + "R_PPC64_TLSLD": true, + "R_PPC64_TOC": true, + "R_PPC64_TOC16": true, + "R_PPC64_TOC16_DS": true, + "R_PPC64_TOC16_HA": true, + "R_PPC64_TOC16_HI": true, + "R_PPC64_TOC16_LO": true, + "R_PPC64_TOC16_LO_DS": true, + "R_PPC64_TOCSAVE": true, + "R_PPC64_TPREL16": true, + "R_PPC64_TPREL16_DS": true, + "R_PPC64_TPREL16_HA": true, + "R_PPC64_TPREL16_HI": true, + "R_PPC64_TPREL16_HIGH": true, + "R_PPC64_TPREL16_HIGHA": true, + "R_PPC64_TPREL16_HIGHER": true, + "R_PPC64_TPREL16_HIGHERA": true, + "R_PPC64_TPREL16_HIGHEST": true, + "R_PPC64_TPREL16_HIGHESTA": true, + "R_PPC64_TPREL16_LO": true, + "R_PPC64_TPREL16_LO_DS": true, + "R_PPC64_TPREL64": true, + "R_PPC_ADDR14": true, + "R_PPC_ADDR14_BRNTAKEN": true, + "R_PPC_ADDR14_BRTAKEN": true, + "R_PPC_ADDR16": true, + "R_PPC_ADDR16_HA": true, + "R_PPC_ADDR16_HI": true, + "R_PPC_ADDR16_LO": true, + "R_PPC_ADDR24": true, + "R_PPC_ADDR32": true, + "R_PPC_COPY": true, + "R_PPC_DTPMOD32": true, + "R_PPC_DTPREL16": true, + "R_PPC_DTPREL16_HA": true, + "R_PPC_DTPREL16_HI": true, + "R_PPC_DTPREL16_LO": true, + "R_PPC_DTPREL32": true, + "R_PPC_EMB_BIT_FLD": true, + "R_PPC_EMB_MRKREF": true, + "R_PPC_EMB_NADDR16": true, + "R_PPC_EMB_NADDR16_HA": true, + "R_PPC_EMB_NADDR16_HI": true, + "R_PPC_EMB_NADDR16_LO": true, + "R_PPC_EMB_NADDR32": true, + "R_PPC_EMB_RELSDA": true, + "R_PPC_EMB_RELSEC16": true, + "R_PPC_EMB_RELST_HA": true, + "R_PPC_EMB_RELST_HI": true, + "R_PPC_EMB_RELST_LO": true, + "R_PPC_EMB_SDA21": true, + "R_PPC_EMB_SDA2I16": true, + "R_PPC_EMB_SDA2REL": true, + "R_PPC_EMB_SDAI16": true, + "R_PPC_GLOB_DAT": true, + "R_PPC_GOT16": true, + "R_PPC_GOT16_HA": true, + "R_PPC_GOT16_HI": true, + "R_PPC_GOT16_LO": true, + "R_PPC_GOT_TLSGD16": true, + "R_PPC_GOT_TLSGD16_HA": true, + "R_PPC_GOT_TLSGD16_HI": true, + "R_PPC_GOT_TLSGD16_LO": true, + "R_PPC_GOT_TLSLD16": true, + "R_PPC_GOT_TLSLD16_HA": true, + "R_PPC_GOT_TLSLD16_HI": true, + "R_PPC_GOT_TLSLD16_LO": true, + "R_PPC_GOT_TPREL16": true, + "R_PPC_GOT_TPREL16_HA": true, + "R_PPC_GOT_TPREL16_HI": true, + "R_PPC_GOT_TPREL16_LO": true, + "R_PPC_JMP_SLOT": true, + "R_PPC_LOCAL24PC": true, + "R_PPC_NONE": true, + "R_PPC_PLT16_HA": true, + "R_PPC_PLT16_HI": true, + "R_PPC_PLT16_LO": true, + "R_PPC_PLT32": true, + "R_PPC_PLTREL24": true, + "R_PPC_PLTREL32": true, + "R_PPC_REL14": true, + "R_PPC_REL14_BRNTAKEN": true, + "R_PPC_REL14_BRTAKEN": true, + "R_PPC_REL24": true, + "R_PPC_REL32": true, + "R_PPC_RELATIVE": true, + "R_PPC_SDAREL16": true, + "R_PPC_SECTOFF": true, + "R_PPC_SECTOFF_HA": true, + "R_PPC_SECTOFF_HI": true, + "R_PPC_SECTOFF_LO": true, + "R_PPC_TLS": true, + "R_PPC_TPREL16": true, + "R_PPC_TPREL16_HA": true, + "R_PPC_TPREL16_HI": true, + "R_PPC_TPREL16_LO": true, + "R_PPC_TPREL32": true, + "R_PPC_UADDR16": true, + "R_PPC_UADDR32": true, + "R_RISCV": true, + "R_RISCV_32": true, + "R_RISCV_32_PCREL": true, + "R_RISCV_64": true, + "R_RISCV_ADD16": true, + "R_RISCV_ADD32": true, + "R_RISCV_ADD64": true, + "R_RISCV_ADD8": true, + "R_RISCV_ALIGN": true, + "R_RISCV_BRANCH": true, + "R_RISCV_CALL": true, + "R_RISCV_CALL_PLT": true, + "R_RISCV_COPY": true, + "R_RISCV_GNU_VTENTRY": true, + "R_RISCV_GNU_VTINHERIT": true, + "R_RISCV_GOT_HI20": true, + "R_RISCV_GPREL_I": true, + "R_RISCV_GPREL_S": true, + "R_RISCV_HI20": true, + "R_RISCV_JAL": true, + "R_RISCV_JUMP_SLOT": true, + "R_RISCV_LO12_I": true, + "R_RISCV_LO12_S": true, + "R_RISCV_NONE": true, + "R_RISCV_PCREL_HI20": true, + "R_RISCV_PCREL_LO12_I": true, + "R_RISCV_PCREL_LO12_S": true, + "R_RISCV_RELATIVE": true, + "R_RISCV_RELAX": true, + "R_RISCV_RVC_BRANCH": true, + "R_RISCV_RVC_JUMP": true, + "R_RISCV_RVC_LUI": true, + "R_RISCV_SET16": true, + "R_RISCV_SET32": true, + "R_RISCV_SET6": true, + "R_RISCV_SET8": true, + "R_RISCV_SUB16": true, + "R_RISCV_SUB32": true, + "R_RISCV_SUB6": true, + "R_RISCV_SUB64": true, + "R_RISCV_SUB8": true, + "R_RISCV_TLS_DTPMOD32": true, + "R_RISCV_TLS_DTPMOD64": true, + "R_RISCV_TLS_DTPREL32": true, + "R_RISCV_TLS_DTPREL64": true, + "R_RISCV_TLS_GD_HI20": true, + "R_RISCV_TLS_GOT_HI20": true, + "R_RISCV_TLS_TPREL32": true, + "R_RISCV_TLS_TPREL64": true, + "R_RISCV_TPREL_ADD": true, + "R_RISCV_TPREL_HI20": true, + "R_RISCV_TPREL_I": true, + "R_RISCV_TPREL_LO12_I": true, + "R_RISCV_TPREL_LO12_S": true, + "R_RISCV_TPREL_S": true, + "R_SPARC": true, + "R_SPARC_10": true, + "R_SPARC_11": true, + "R_SPARC_13": true, + "R_SPARC_16": true, + "R_SPARC_22": true, + "R_SPARC_32": true, + "R_SPARC_5": true, + "R_SPARC_6": true, + "R_SPARC_64": true, + "R_SPARC_7": true, + "R_SPARC_8": true, + "R_SPARC_COPY": true, + "R_SPARC_DISP16": true, + "R_SPARC_DISP32": true, + "R_SPARC_DISP64": true, + "R_SPARC_DISP8": true, + "R_SPARC_GLOB_DAT": true, + "R_SPARC_GLOB_JMP": true, + "R_SPARC_GOT10": true, + "R_SPARC_GOT13": true, + "R_SPARC_GOT22": true, + "R_SPARC_H44": true, + "R_SPARC_HH22": true, + "R_SPARC_HI22": true, + "R_SPARC_HIPLT22": true, + "R_SPARC_HIX22": true, + "R_SPARC_HM10": true, + "R_SPARC_JMP_SLOT": true, + "R_SPARC_L44": true, + "R_SPARC_LM22": true, + "R_SPARC_LO10": true, + "R_SPARC_LOPLT10": true, + "R_SPARC_LOX10": true, + "R_SPARC_M44": true, + "R_SPARC_NONE": true, + "R_SPARC_OLO10": true, + "R_SPARC_PC10": true, + "R_SPARC_PC22": true, + "R_SPARC_PCPLT10": true, + "R_SPARC_PCPLT22": true, + "R_SPARC_PCPLT32": true, + "R_SPARC_PC_HH22": true, + "R_SPARC_PC_HM10": true, + "R_SPARC_PC_LM22": true, + "R_SPARC_PLT32": true, + "R_SPARC_PLT64": true, + "R_SPARC_REGISTER": true, + "R_SPARC_RELATIVE": true, + "R_SPARC_UA16": true, + "R_SPARC_UA32": true, + "R_SPARC_UA64": true, + "R_SPARC_WDISP16": true, + "R_SPARC_WDISP19": true, + "R_SPARC_WDISP22": true, + "R_SPARC_WDISP30": true, + "R_SPARC_WPLT30": true, + "R_SYM32": true, + "R_SYM64": true, + "R_TYPE32": true, + "R_TYPE64": true, + "R_X86_64": true, + "R_X86_64_16": true, + "R_X86_64_32": true, + "R_X86_64_32S": true, + "R_X86_64_64": true, + "R_X86_64_8": true, + "R_X86_64_COPY": true, + "R_X86_64_DTPMOD64": true, + "R_X86_64_DTPOFF32": true, + "R_X86_64_DTPOFF64": true, + "R_X86_64_GLOB_DAT": true, + "R_X86_64_GOT32": true, + "R_X86_64_GOT64": true, + "R_X86_64_GOTOFF64": true, + "R_X86_64_GOTPC32": true, + "R_X86_64_GOTPC32_TLSDESC": true, + "R_X86_64_GOTPC64": true, + "R_X86_64_GOTPCREL": true, + "R_X86_64_GOTPCREL64": true, + "R_X86_64_GOTPCRELX": true, + "R_X86_64_GOTPLT64": true, + "R_X86_64_GOTTPOFF": true, + "R_X86_64_IRELATIVE": true, + "R_X86_64_JMP_SLOT": true, + "R_X86_64_NONE": true, + "R_X86_64_PC16": true, + "R_X86_64_PC32": true, + "R_X86_64_PC32_BND": true, + "R_X86_64_PC64": true, + "R_X86_64_PC8": true, + "R_X86_64_PLT32": true, + "R_X86_64_PLT32_BND": true, + "R_X86_64_PLTOFF64": true, + "R_X86_64_RELATIVE": true, + "R_X86_64_RELATIVE64": true, + "R_X86_64_REX_GOTPCRELX": true, + "R_X86_64_SIZE32": true, + "R_X86_64_SIZE64": true, + "R_X86_64_TLSDESC": true, + "R_X86_64_TLSDESC_CALL": true, + "R_X86_64_TLSGD": true, + "R_X86_64_TLSLD": true, + "R_X86_64_TPOFF32": true, + "R_X86_64_TPOFF64": true, + "Rel32": true, + "Rel64": true, + "Rela32": true, + "Rela64": true, + "SHF_ALLOC": true, + "SHF_COMPRESSED": true, + "SHF_EXECINSTR": true, + "SHF_GROUP": true, + "SHF_INFO_LINK": true, + "SHF_LINK_ORDER": true, + "SHF_MASKOS": true, + "SHF_MASKPROC": true, + "SHF_MERGE": true, + "SHF_OS_NONCONFORMING": true, + "SHF_STRINGS": true, + "SHF_TLS": true, + "SHF_WRITE": true, + "SHN_ABS": true, + "SHN_COMMON": true, + "SHN_HIOS": true, + "SHN_HIPROC": true, + "SHN_HIRESERVE": true, + "SHN_LOOS": true, + "SHN_LOPROC": true, + "SHN_LORESERVE": true, + "SHN_UNDEF": true, + "SHN_XINDEX": true, + "SHT_DYNAMIC": true, + "SHT_DYNSYM": true, + "SHT_FINI_ARRAY": true, + "SHT_GNU_ATTRIBUTES": true, + "SHT_GNU_HASH": true, + "SHT_GNU_LIBLIST": true, + "SHT_GNU_VERDEF": true, + "SHT_GNU_VERNEED": true, + "SHT_GNU_VERSYM": true, + "SHT_GROUP": true, + "SHT_HASH": true, + "SHT_HIOS": true, + "SHT_HIPROC": true, + "SHT_HIUSER": true, + "SHT_INIT_ARRAY": true, + "SHT_LOOS": true, + "SHT_LOPROC": true, + "SHT_LOUSER": true, + "SHT_NOBITS": true, + "SHT_NOTE": true, + "SHT_NULL": true, + "SHT_PREINIT_ARRAY": true, + "SHT_PROGBITS": true, + "SHT_REL": true, + "SHT_RELA": true, + "SHT_SHLIB": true, + "SHT_STRTAB": true, + "SHT_SYMTAB": true, + "SHT_SYMTAB_SHNDX": true, + "STB_GLOBAL": true, + "STB_HIOS": true, + "STB_HIPROC": true, + "STB_LOCAL": true, + "STB_LOOS": true, + "STB_LOPROC": true, + "STB_WEAK": true, + "STT_COMMON": true, + "STT_FILE": true, + "STT_FUNC": true, + "STT_HIOS": true, + "STT_HIPROC": true, + "STT_LOOS": true, + "STT_LOPROC": true, + "STT_NOTYPE": true, + "STT_OBJECT": true, + "STT_SECTION": true, + "STT_TLS": true, + "STV_DEFAULT": true, + "STV_HIDDEN": true, + "STV_INTERNAL": true, + "STV_PROTECTED": true, + "ST_BIND": true, + "ST_INFO": true, + "ST_TYPE": true, + "ST_VISIBILITY": true, + "Section": true, + "Section32": true, + "Section64": true, + "SectionFlag": true, + "SectionHeader": true, + "SectionIndex": true, + "SectionType": true, + "Sym32": true, + "Sym32Size": true, + "Sym64": true, + "Sym64Size": true, + "SymBind": true, + "SymType": true, + "SymVis": true, + "Symbol": true, + "Type": true, + "Version": true, + }, + "debug/gosym": map[string]bool{ + "DecodingError": true, + "Func": true, + "LineTable": true, + "NewLineTable": true, + "NewTable": true, + "Obj": true, + "Sym": true, + "Table": true, + "UnknownFileError": true, + "UnknownLineError": true, + }, + "debug/macho": map[string]bool{ + "ARM64_RELOC_ADDEND": true, + "ARM64_RELOC_BRANCH26": true, + "ARM64_RELOC_GOT_LOAD_PAGE21": true, + "ARM64_RELOC_GOT_LOAD_PAGEOFF12": true, + "ARM64_RELOC_PAGE21": true, + "ARM64_RELOC_PAGEOFF12": true, + "ARM64_RELOC_POINTER_TO_GOT": true, + "ARM64_RELOC_SUBTRACTOR": true, + "ARM64_RELOC_TLVP_LOAD_PAGE21": true, + "ARM64_RELOC_TLVP_LOAD_PAGEOFF12": true, + "ARM64_RELOC_UNSIGNED": true, + "ARM_RELOC_BR24": true, + "ARM_RELOC_HALF": true, + "ARM_RELOC_HALF_SECTDIFF": true, + "ARM_RELOC_LOCAL_SECTDIFF": true, + "ARM_RELOC_PAIR": true, + "ARM_RELOC_PB_LA_PTR": true, + "ARM_RELOC_SECTDIFF": true, + "ARM_RELOC_VANILLA": true, + "ARM_THUMB_32BIT_BRANCH": true, + "ARM_THUMB_RELOC_BR22": true, + "Cpu": true, + "Cpu386": true, + "CpuAmd64": true, + "CpuArm": true, + "CpuArm64": true, + "CpuPpc": true, + "CpuPpc64": true, + "Dylib": true, + "DylibCmd": true, + "Dysymtab": true, + "DysymtabCmd": true, + "ErrNotFat": true, + "FatArch": true, + "FatArchHeader": true, + "FatFile": true, + "File": true, + "FileHeader": true, + "FlagAllModsBound": true, + "FlagAllowStackExecution": true, + "FlagAppExtensionSafe": true, + "FlagBindAtLoad": true, + "FlagBindsToWeak": true, + "FlagCanonical": true, + "FlagDeadStrippableDylib": true, + "FlagDyldLink": true, + "FlagForceFlat": true, + "FlagHasTLVDescriptors": true, + "FlagIncrLink": true, + "FlagLazyInit": true, + "FlagNoFixPrebinding": true, + "FlagNoHeapExecution": true, + "FlagNoMultiDefs": true, + "FlagNoReexportedDylibs": true, + "FlagNoUndefs": true, + "FlagPIE": true, + "FlagPrebindable": true, + "FlagPrebound": true, + "FlagRootSafe": true, + "FlagSetuidSafe": true, + "FlagSplitSegs": true, + "FlagSubsectionsViaSymbols": true, + "FlagTwoLevel": true, + "FlagWeakDefines": true, + "FormatError": true, + "GENERIC_RELOC_LOCAL_SECTDIFF": true, + "GENERIC_RELOC_PAIR": true, + "GENERIC_RELOC_PB_LA_PTR": true, + "GENERIC_RELOC_SECTDIFF": true, + "GENERIC_RELOC_TLV": true, + "GENERIC_RELOC_VANILLA": true, + "Load": true, + "LoadBytes": true, + "LoadCmd": true, + "LoadCmdDylib": true, + "LoadCmdDylinker": true, + "LoadCmdDysymtab": true, + "LoadCmdRpath": true, + "LoadCmdSegment": true, + "LoadCmdSegment64": true, + "LoadCmdSymtab": true, + "LoadCmdThread": true, + "LoadCmdUnixThread": true, + "Magic32": true, + "Magic64": true, + "MagicFat": true, + "NewFatFile": true, + "NewFile": true, + "Nlist32": true, + "Nlist64": true, + "Open": true, + "OpenFat": true, + "Regs386": true, + "RegsAMD64": true, + "Reloc": true, + "RelocTypeARM": true, + "RelocTypeARM64": true, + "RelocTypeGeneric": true, + "RelocTypeX86_64": true, + "Rpath": true, + "RpathCmd": true, + "Section": true, + "Section32": true, + "Section64": true, + "SectionHeader": true, + "Segment": true, + "Segment32": true, + "Segment64": true, + "SegmentHeader": true, + "Symbol": true, + "Symtab": true, + "SymtabCmd": true, + "Thread": true, + "Type": true, + "TypeBundle": true, + "TypeDylib": true, + "TypeExec": true, + "TypeObj": true, + "X86_64_RELOC_BRANCH": true, + "X86_64_RELOC_GOT": true, + "X86_64_RELOC_GOT_LOAD": true, + "X86_64_RELOC_SIGNED": true, + "X86_64_RELOC_SIGNED_1": true, + "X86_64_RELOC_SIGNED_2": true, + "X86_64_RELOC_SIGNED_4": true, + "X86_64_RELOC_SUBTRACTOR": true, + "X86_64_RELOC_TLV": true, + "X86_64_RELOC_UNSIGNED": true, + }, + "debug/pe": map[string]bool{ + "COFFSymbol": true, + "COFFSymbolSize": true, + "DataDirectory": true, + "File": true, + "FileHeader": true, + "FormatError": true, + "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE": true, + "IMAGE_DIRECTORY_ENTRY_BASERELOC": true, + "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT": true, + "IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR": true, + "IMAGE_DIRECTORY_ENTRY_DEBUG": true, + "IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT": true, + "IMAGE_DIRECTORY_ENTRY_EXCEPTION": true, + "IMAGE_DIRECTORY_ENTRY_EXPORT": true, + "IMAGE_DIRECTORY_ENTRY_GLOBALPTR": true, + "IMAGE_DIRECTORY_ENTRY_IAT": true, + "IMAGE_DIRECTORY_ENTRY_IMPORT": true, + "IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG": true, + "IMAGE_DIRECTORY_ENTRY_RESOURCE": true, + "IMAGE_DIRECTORY_ENTRY_SECURITY": true, + "IMAGE_DIRECTORY_ENTRY_TLS": true, + "IMAGE_FILE_MACHINE_AM33": true, + "IMAGE_FILE_MACHINE_AMD64": true, + "IMAGE_FILE_MACHINE_ARM": true, + "IMAGE_FILE_MACHINE_ARM64": true, + "IMAGE_FILE_MACHINE_ARMNT": true, + "IMAGE_FILE_MACHINE_EBC": true, + "IMAGE_FILE_MACHINE_I386": true, + "IMAGE_FILE_MACHINE_IA64": true, + "IMAGE_FILE_MACHINE_M32R": true, + "IMAGE_FILE_MACHINE_MIPS16": true, + "IMAGE_FILE_MACHINE_MIPSFPU": true, + "IMAGE_FILE_MACHINE_MIPSFPU16": true, + "IMAGE_FILE_MACHINE_POWERPC": true, + "IMAGE_FILE_MACHINE_POWERPCFP": true, + "IMAGE_FILE_MACHINE_R4000": true, + "IMAGE_FILE_MACHINE_SH3": true, + "IMAGE_FILE_MACHINE_SH3DSP": true, + "IMAGE_FILE_MACHINE_SH4": true, + "IMAGE_FILE_MACHINE_SH5": true, + "IMAGE_FILE_MACHINE_THUMB": true, + "IMAGE_FILE_MACHINE_UNKNOWN": true, + "IMAGE_FILE_MACHINE_WCEMIPSV2": true, + "ImportDirectory": true, + "NewFile": true, + "Open": true, + "OptionalHeader32": true, + "OptionalHeader64": true, + "Reloc": true, + "Section": true, + "SectionHeader": true, + "SectionHeader32": true, + "StringTable": true, + "Symbol": true, + }, + "debug/plan9obj": map[string]bool{ + "File": true, + "FileHeader": true, + "Magic386": true, + "Magic64": true, + "MagicAMD64": true, + "MagicARM": true, + "NewFile": true, + "Open": true, + "Section": true, + "SectionHeader": true, + "Sym": true, + }, + "encoding": map[string]bool{ + "BinaryMarshaler": true, + "BinaryUnmarshaler": true, + "TextMarshaler": true, + "TextUnmarshaler": true, + }, + "encoding/ascii85": map[string]bool{ + "CorruptInputError": true, + "Decode": true, + "Encode": true, + "MaxEncodedLen": true, + "NewDecoder": true, + "NewEncoder": true, + }, + "encoding/asn1": map[string]bool{ + "BitString": true, + "ClassApplication": true, + "ClassContextSpecific": true, + "ClassPrivate": true, + "ClassUniversal": true, + "Enumerated": true, + "Flag": true, + "Marshal": true, + "MarshalWithParams": true, + "NullBytes": true, + "NullRawValue": true, + "ObjectIdentifier": true, + "RawContent": true, + "RawValue": true, + "StructuralError": true, + "SyntaxError": true, + "TagBitString": true, + "TagBoolean": true, + "TagEnum": true, + "TagGeneralString": true, + "TagGeneralizedTime": true, + "TagIA5String": true, + "TagInteger": true, + "TagNull": true, + "TagNumericString": true, + "TagOID": true, + "TagOctetString": true, + "TagPrintableString": true, + "TagSequence": true, + "TagSet": true, + "TagT61String": true, + "TagUTCTime": true, + "TagUTF8String": true, + "Unmarshal": true, + "UnmarshalWithParams": true, + }, + "encoding/base32": map[string]bool{ + "CorruptInputError": true, + "Encoding": true, + "HexEncoding": true, + "NewDecoder": true, + "NewEncoder": true, + "NewEncoding": true, + "NoPadding": true, + "StdEncoding": true, + "StdPadding": true, + }, + "encoding/base64": map[string]bool{ + "CorruptInputError": true, + "Encoding": true, + "NewDecoder": true, + "NewEncoder": true, + "NewEncoding": true, + "NoPadding": true, + "RawStdEncoding": true, + "RawURLEncoding": true, + "StdEncoding": true, + "StdPadding": true, + "URLEncoding": true, + }, + "encoding/binary": map[string]bool{ + "BigEndian": true, + "ByteOrder": true, + "LittleEndian": true, + "MaxVarintLen16": true, + "MaxVarintLen32": true, + "MaxVarintLen64": true, + "PutUvarint": true, + "PutVarint": true, + "Read": true, + "ReadUvarint": true, + "ReadVarint": true, + "Size": true, + "Uvarint": true, + "Varint": true, + "Write": true, + }, + "encoding/csv": map[string]bool{ + "ErrBareQuote": true, + "ErrFieldCount": true, + "ErrQuote": true, + "ErrTrailingComma": true, + "NewReader": true, + "NewWriter": true, + "ParseError": true, + "Reader": true, + "Writer": true, + }, + "encoding/gob": map[string]bool{ + "CommonType": true, + "Decoder": true, + "Encoder": true, + "GobDecoder": true, + "GobEncoder": true, + "NewDecoder": true, + "NewEncoder": true, + "Register": true, + "RegisterName": true, + }, + "encoding/hex": map[string]bool{ + "Decode": true, + "DecodeString": true, + "DecodedLen": true, + "Dump": true, + "Dumper": true, + "Encode": true, + "EncodeToString": true, + "EncodedLen": true, + "ErrLength": true, + "InvalidByteError": true, + "NewDecoder": true, + "NewEncoder": true, + }, + "encoding/json": map[string]bool{ + "Compact": true, + "Decoder": true, + "Delim": true, + "Encoder": true, + "HTMLEscape": true, + "Indent": true, + "InvalidUTF8Error": true, + "InvalidUnmarshalError": true, + "Marshal": true, + "MarshalIndent": true, + "Marshaler": true, + "MarshalerError": true, + "NewDecoder": true, + "NewEncoder": true, + "Number": true, + "RawMessage": true, + "SyntaxError": true, + "Token": true, + "Unmarshal": true, + "UnmarshalFieldError": true, + "UnmarshalTypeError": true, + "Unmarshaler": true, + "UnsupportedTypeError": true, + "UnsupportedValueError": true, + "Valid": true, + }, + "encoding/pem": map[string]bool{ + "Block": true, + "Decode": true, + "Encode": true, + "EncodeToMemory": true, + }, + "encoding/xml": map[string]bool{ + "Attr": true, + "CharData": true, + "Comment": true, + "CopyToken": true, + "Decoder": true, + "Directive": true, + "Encoder": true, + "EndElement": true, + "Escape": true, + "EscapeText": true, + "HTMLAutoClose": true, + "HTMLEntity": true, + "Header": true, + "Marshal": true, + "MarshalIndent": true, + "Marshaler": true, + "MarshalerAttr": true, + "Name": true, + "NewDecoder": true, + "NewEncoder": true, + "NewTokenDecoder": true, + "ProcInst": true, + "StartElement": true, + "SyntaxError": true, + "TagPathError": true, + "Token": true, + "TokenReader": true, + "Unmarshal": true, + "UnmarshalError": true, + "Unmarshaler": true, + "UnmarshalerAttr": true, + "UnsupportedTypeError": true, + }, + "errors": map[string]bool{ + "As": true, + "Is": true, + "New": true, + "Unwrap": true, + }, + "expvar": map[string]bool{ + "Do": true, + "Float": true, + "Func": true, + "Get": true, + "Handler": true, + "Int": true, + "KeyValue": true, + "Map": true, + "NewFloat": true, + "NewInt": true, + "NewMap": true, + "NewString": true, + "Publish": true, + "String": true, + "Var": true, + }, + "flag": map[string]bool{ + "Arg": true, + "Args": true, + "Bool": true, + "BoolVar": true, + "CommandLine": true, + "ContinueOnError": true, + "Duration": true, + "DurationVar": true, + "ErrHelp": true, + "ErrorHandling": true, + "ExitOnError": true, + "Flag": true, + "FlagSet": true, + "Float64": true, + "Float64Var": true, + "Getter": true, + "Int": true, + "Int64": true, + "Int64Var": true, + "IntVar": true, + "Lookup": true, + "NArg": true, + "NFlag": true, + "NewFlagSet": true, + "PanicOnError": true, + "Parse": true, + "Parsed": true, + "PrintDefaults": true, + "Set": true, + "String": true, + "StringVar": true, + "Uint": true, + "Uint64": true, + "Uint64Var": true, + "UintVar": true, + "UnquoteUsage": true, + "Usage": true, + "Value": true, + "Var": true, + "Visit": true, + "VisitAll": true, + }, + "fmt": map[string]bool{ + "Errorf": true, + "Formatter": true, + "Fprint": true, + "Fprintf": true, + "Fprintln": true, + "Fscan": true, + "Fscanf": true, + "Fscanln": true, + "GoStringer": true, + "Print": true, + "Printf": true, + "Println": true, + "Scan": true, + "ScanState": true, + "Scanf": true, + "Scanln": true, + "Scanner": true, + "Sprint": true, + "Sprintf": true, + "Sprintln": true, + "Sscan": true, + "Sscanf": true, + "Sscanln": true, + "State": true, + "Stringer": true, + }, + "go/ast": map[string]bool{ + "ArrayType": true, + "AssignStmt": true, + "Bad": true, + "BadDecl": true, + "BadExpr": true, + "BadStmt": true, + "BasicLit": true, + "BinaryExpr": true, + "BlockStmt": true, + "BranchStmt": true, + "CallExpr": true, + "CaseClause": true, + "ChanDir": true, + "ChanType": true, + "CommClause": true, + "Comment": true, + "CommentGroup": true, + "CommentMap": true, + "CompositeLit": true, + "Con": true, + "Decl": true, + "DeclStmt": true, + "DeferStmt": true, + "Ellipsis": true, + "EmptyStmt": true, + "Expr": true, + "ExprStmt": true, + "Field": true, + "FieldFilter": true, + "FieldList": true, + "File": true, + "FileExports": true, + "Filter": true, + "FilterDecl": true, + "FilterFile": true, + "FilterFuncDuplicates": true, + "FilterImportDuplicates": true, + "FilterPackage": true, + "FilterUnassociatedComments": true, + "ForStmt": true, + "Fprint": true, + "Fun": true, + "FuncDecl": true, + "FuncLit": true, + "FuncType": true, + "GenDecl": true, + "GoStmt": true, + "Ident": true, + "IfStmt": true, + "ImportSpec": true, + "Importer": true, + "IncDecStmt": true, + "IndexExpr": true, + "Inspect": true, + "InterfaceType": true, + "IsExported": true, + "KeyValueExpr": true, + "LabeledStmt": true, + "Lbl": true, + "MapType": true, + "MergeMode": true, + "MergePackageFiles": true, + "NewCommentMap": true, + "NewIdent": true, + "NewObj": true, + "NewPackage": true, + "NewScope": true, + "Node": true, + "NotNilFilter": true, + "ObjKind": true, + "Object": true, + "Package": true, + "PackageExports": true, + "ParenExpr": true, + "Pkg": true, + "Print": true, + "RECV": true, + "RangeStmt": true, + "ReturnStmt": true, + "SEND": true, + "Scope": true, + "SelectStmt": true, + "SelectorExpr": true, + "SendStmt": true, + "SliceExpr": true, + "SortImports": true, + "Spec": true, + "StarExpr": true, + "Stmt": true, + "StructType": true, + "SwitchStmt": true, + "Typ": true, + "TypeAssertExpr": true, + "TypeSpec": true, + "TypeSwitchStmt": true, + "UnaryExpr": true, + "ValueSpec": true, + "Var": true, + "Visitor": true, + "Walk": true, + }, + "go/build": map[string]bool{ + "AllowBinary": true, + "ArchChar": true, + "Context": true, + "Default": true, + "FindOnly": true, + "IgnoreVendor": true, + "Import": true, + "ImportComment": true, + "ImportDir": true, + "ImportMode": true, + "IsLocalImport": true, + "MultiplePackageError": true, + "NoGoError": true, + "Package": true, + "ToolDir": true, + }, + "go/constant": map[string]bool{ + "BinaryOp": true, + "BitLen": true, + "Bool": true, + "BoolVal": true, + "Bytes": true, + "Compare": true, + "Complex": true, + "Denom": true, + "Float": true, + "Float32Val": true, + "Float64Val": true, + "Imag": true, + "Int": true, + "Int64Val": true, + "Kind": true, + "Make": true, + "MakeBool": true, + "MakeFloat64": true, + "MakeFromBytes": true, + "MakeFromLiteral": true, + "MakeImag": true, + "MakeInt64": true, + "MakeString": true, + "MakeUint64": true, + "MakeUnknown": true, + "Num": true, + "Real": true, + "Shift": true, + "Sign": true, + "String": true, + "StringVal": true, + "ToComplex": true, + "ToFloat": true, + "ToInt": true, + "Uint64Val": true, + "UnaryOp": true, + "Unknown": true, + "Val": true, + "Value": true, + }, + "go/doc": map[string]bool{ + "AllDecls": true, + "AllMethods": true, + "Example": true, + "Examples": true, + "Filter": true, + "Func": true, + "IllegalPrefixes": true, + "IsPredeclared": true, + "Mode": true, + "New": true, + "Note": true, + "Package": true, + "PreserveAST": true, + "Synopsis": true, + "ToHTML": true, + "ToText": true, + "Type": true, + "Value": true, + }, + "go/format": map[string]bool{ + "Node": true, + "Source": true, + }, + "go/importer": map[string]bool{ + "Default": true, + "For": true, + "ForCompiler": true, + "Lookup": true, + }, + "go/parser": map[string]bool{ + "AllErrors": true, + "DeclarationErrors": true, + "ImportsOnly": true, + "Mode": true, + "PackageClauseOnly": true, + "ParseComments": true, + "ParseDir": true, + "ParseExpr": true, + "ParseExprFrom": true, + "ParseFile": true, + "SpuriousErrors": true, + "Trace": true, + }, + "go/printer": map[string]bool{ + "CommentedNode": true, + "Config": true, + "Fprint": true, + "Mode": true, + "RawFormat": true, + "SourcePos": true, + "TabIndent": true, + "UseSpaces": true, + }, + "go/scanner": map[string]bool{ + "Error": true, + "ErrorHandler": true, + "ErrorList": true, + "Mode": true, + "PrintError": true, + "ScanComments": true, + "Scanner": true, + }, + "go/token": map[string]bool{ + "ADD": true, + "ADD_ASSIGN": true, + "AND": true, + "AND_ASSIGN": true, + "AND_NOT": true, + "AND_NOT_ASSIGN": true, + "ARROW": true, + "ASSIGN": true, + "BREAK": true, + "CASE": true, + "CHAN": true, + "CHAR": true, + "COLON": true, + "COMMA": true, + "COMMENT": true, + "CONST": true, + "CONTINUE": true, + "DEC": true, + "DEFAULT": true, + "DEFER": true, + "DEFINE": true, + "ELLIPSIS": true, + "ELSE": true, + "EOF": true, + "EQL": true, + "FALLTHROUGH": true, + "FLOAT": true, + "FOR": true, + "FUNC": true, + "File": true, + "FileSet": true, + "GEQ": true, + "GO": true, + "GOTO": true, + "GTR": true, + "HighestPrec": true, + "IDENT": true, + "IF": true, + "ILLEGAL": true, + "IMAG": true, + "IMPORT": true, + "INC": true, + "INT": true, + "INTERFACE": true, + "IsExported": true, + "IsIdentifier": true, + "IsKeyword": true, + "LAND": true, + "LBRACE": true, + "LBRACK": true, + "LEQ": true, + "LOR": true, + "LPAREN": true, + "LSS": true, + "Lookup": true, + "LowestPrec": true, + "MAP": true, + "MUL": true, + "MUL_ASSIGN": true, + "NEQ": true, + "NOT": true, + "NewFileSet": true, + "NoPos": true, + "OR": true, + "OR_ASSIGN": true, + "PACKAGE": true, + "PERIOD": true, + "Pos": true, + "Position": true, + "QUO": true, + "QUO_ASSIGN": true, + "RANGE": true, + "RBRACE": true, + "RBRACK": true, + "REM": true, + "REM_ASSIGN": true, + "RETURN": true, + "RPAREN": true, + "SELECT": true, + "SEMICOLON": true, + "SHL": true, + "SHL_ASSIGN": true, + "SHR": true, + "SHR_ASSIGN": true, + "STRING": true, + "STRUCT": true, + "SUB": true, + "SUB_ASSIGN": true, + "SWITCH": true, + "TYPE": true, + "Token": true, + "UnaryPrec": true, + "VAR": true, + "XOR": true, + "XOR_ASSIGN": true, + }, + "go/types": map[string]bool{ + "Array": true, + "AssertableTo": true, + "AssignableTo": true, + "Basic": true, + "BasicInfo": true, + "BasicKind": true, + "Bool": true, + "Builtin": true, + "Byte": true, + "Chan": true, + "ChanDir": true, + "CheckExpr": true, + "Checker": true, + "Comparable": true, + "Complex128": true, + "Complex64": true, + "Config": true, + "Const": true, + "ConvertibleTo": true, + "DefPredeclaredTestFuncs": true, + "Default": true, + "Error": true, + "Eval": true, + "ExprString": true, + "FieldVal": true, + "Float32": true, + "Float64": true, + "Func": true, + "Id": true, + "Identical": true, + "IdenticalIgnoreTags": true, + "Implements": true, + "ImportMode": true, + "Importer": true, + "ImporterFrom": true, + "Info": true, + "Initializer": true, + "Int": true, + "Int16": true, + "Int32": true, + "Int64": true, + "Int8": true, + "Interface": true, + "Invalid": true, + "IsBoolean": true, + "IsComplex": true, + "IsConstType": true, + "IsFloat": true, + "IsInteger": true, + "IsInterface": true, + "IsNumeric": true, + "IsOrdered": true, + "IsString": true, + "IsUnsigned": true, + "IsUntyped": true, + "Label": true, + "LookupFieldOrMethod": true, + "Map": true, + "MethodExpr": true, + "MethodSet": true, + "MethodVal": true, + "MissingMethod": true, + "Named": true, + "NewArray": true, + "NewChan": true, + "NewChecker": true, + "NewConst": true, + "NewField": true, + "NewFunc": true, + "NewInterface": true, + "NewInterfaceType": true, + "NewLabel": true, + "NewMap": true, + "NewMethodSet": true, + "NewNamed": true, + "NewPackage": true, + "NewParam": true, + "NewPkgName": true, + "NewPointer": true, + "NewScope": true, + "NewSignature": true, + "NewSlice": true, + "NewStruct": true, + "NewTuple": true, + "NewTypeName": true, + "NewVar": true, + "Nil": true, + "Object": true, + "ObjectString": true, + "Package": true, + "PkgName": true, + "Pointer": true, + "Qualifier": true, + "RecvOnly": true, + "RelativeTo": true, + "Rune": true, + "Scope": true, + "Selection": true, + "SelectionKind": true, + "SelectionString": true, + "SendOnly": true, + "SendRecv": true, + "Signature": true, + "Sizes": true, + "SizesFor": true, + "Slice": true, + "StdSizes": true, + "String": true, + "Struct": true, + "Tuple": true, + "Typ": true, + "Type": true, + "TypeAndValue": true, + "TypeName": true, + "TypeString": true, + "Uint": true, + "Uint16": true, + "Uint32": true, + "Uint64": true, + "Uint8": true, + "Uintptr": true, + "Universe": true, + "Unsafe": true, + "UnsafePointer": true, + "UntypedBool": true, + "UntypedComplex": true, + "UntypedFloat": true, + "UntypedInt": true, + "UntypedNil": true, + "UntypedRune": true, + "UntypedString": true, + "Var": true, + "WriteExpr": true, + "WriteSignature": true, + "WriteType": true, + }, + "hash": map[string]bool{ + "Hash": true, + "Hash32": true, + "Hash64": true, + }, + "hash/adler32": map[string]bool{ + "Checksum": true, + "New": true, + "Size": true, + }, + "hash/crc32": map[string]bool{ + "Castagnoli": true, + "Checksum": true, + "ChecksumIEEE": true, + "IEEE": true, + "IEEETable": true, + "Koopman": true, + "MakeTable": true, + "New": true, + "NewIEEE": true, + "Size": true, + "Table": true, + "Update": true, + }, + "hash/crc64": map[string]bool{ + "Checksum": true, + "ECMA": true, + "ISO": true, + "MakeTable": true, + "New": true, + "Size": true, + "Table": true, + "Update": true, + }, + "hash/fnv": map[string]bool{ + "New128": true, + "New128a": true, + "New32": true, + "New32a": true, + "New64": true, + "New64a": true, + }, + "html": map[string]bool{ + "EscapeString": true, + "UnescapeString": true, + }, + "html/template": map[string]bool{ + "CSS": true, + "ErrAmbigContext": true, + "ErrBadHTML": true, + "ErrBranchEnd": true, + "ErrEndContext": true, + "ErrNoSuchTemplate": true, + "ErrOutputContext": true, + "ErrPartialCharset": true, + "ErrPartialEscape": true, + "ErrPredefinedEscaper": true, + "ErrRangeLoopReentry": true, + "ErrSlashAmbig": true, + "Error": true, + "ErrorCode": true, + "FuncMap": true, + "HTML": true, + "HTMLAttr": true, + "HTMLEscape": true, + "HTMLEscapeString": true, + "HTMLEscaper": true, + "IsTrue": true, + "JS": true, + "JSEscape": true, + "JSEscapeString": true, + "JSEscaper": true, + "JSStr": true, + "Must": true, + "New": true, + "OK": true, + "ParseFiles": true, + "ParseGlob": true, + "Srcset": true, + "Template": true, + "URL": true, + "URLQueryEscaper": true, + }, + "image": map[string]bool{ + "Alpha": true, + "Alpha16": true, + "Black": true, + "CMYK": true, + "Config": true, + "Decode": true, + "DecodeConfig": true, + "ErrFormat": true, + "Gray": true, + "Gray16": true, + "Image": true, + "NRGBA": true, + "NRGBA64": true, + "NYCbCrA": true, + "NewAlpha": true, + "NewAlpha16": true, + "NewCMYK": true, + "NewGray": true, + "NewGray16": true, + "NewNRGBA": true, + "NewNRGBA64": true, + "NewNYCbCrA": true, + "NewPaletted": true, + "NewRGBA": true, + "NewRGBA64": true, + "NewUniform": true, + "NewYCbCr": true, + "Opaque": true, + "Paletted": true, + "PalettedImage": true, + "Point": true, + "Pt": true, + "RGBA": true, + "RGBA64": true, + "Rect": true, + "Rectangle": true, + "RegisterFormat": true, + "Transparent": true, + "Uniform": true, + "White": true, + "YCbCr": true, + "YCbCrSubsampleRatio": true, + "YCbCrSubsampleRatio410": true, + "YCbCrSubsampleRatio411": true, + "YCbCrSubsampleRatio420": true, + "YCbCrSubsampleRatio422": true, + "YCbCrSubsampleRatio440": true, + "YCbCrSubsampleRatio444": true, + "ZP": true, + "ZR": true, + }, + "image/color": map[string]bool{ + "Alpha": true, + "Alpha16": true, + "Alpha16Model": true, + "AlphaModel": true, + "Black": true, + "CMYK": true, + "CMYKModel": true, + "CMYKToRGB": true, + "Color": true, + "Gray": true, + "Gray16": true, + "Gray16Model": true, + "GrayModel": true, + "Model": true, + "ModelFunc": true, + "NRGBA": true, + "NRGBA64": true, + "NRGBA64Model": true, + "NRGBAModel": true, + "NYCbCrA": true, + "NYCbCrAModel": true, + "Opaque": true, + "Palette": true, + "RGBA": true, + "RGBA64": true, + "RGBA64Model": true, + "RGBAModel": true, + "RGBToCMYK": true, + "RGBToYCbCr": true, + "Transparent": true, + "White": true, + "YCbCr": true, + "YCbCrModel": true, + "YCbCrToRGB": true, + }, + "image/color/palette": map[string]bool{ + "Plan9": true, + "WebSafe": true, + }, + "image/draw": map[string]bool{ + "Draw": true, + "DrawMask": true, + "Drawer": true, + "FloydSteinberg": true, + "Image": true, + "Op": true, + "Over": true, + "Quantizer": true, + "Src": true, + }, + "image/gif": map[string]bool{ + "Decode": true, + "DecodeAll": true, + "DecodeConfig": true, + "DisposalBackground": true, + "DisposalNone": true, + "DisposalPrevious": true, + "Encode": true, + "EncodeAll": true, + "GIF": true, + "Options": true, + }, + "image/jpeg": map[string]bool{ + "Decode": true, + "DecodeConfig": true, + "DefaultQuality": true, + "Encode": true, + "FormatError": true, + "Options": true, + "Reader": true, + "UnsupportedError": true, + }, + "image/png": map[string]bool{ + "BestCompression": true, + "BestSpeed": true, + "CompressionLevel": true, + "Decode": true, + "DecodeConfig": true, + "DefaultCompression": true, + "Encode": true, + "Encoder": true, + "EncoderBuffer": true, + "EncoderBufferPool": true, + "FormatError": true, + "NoCompression": true, + "UnsupportedError": true, + }, + "index/suffixarray": map[string]bool{ + "Index": true, + "New": true, + }, + "io": map[string]bool{ + "ByteReader": true, + "ByteScanner": true, + "ByteWriter": true, + "Closer": true, + "Copy": true, + "CopyBuffer": true, + "CopyN": true, + "EOF": true, + "ErrClosedPipe": true, + "ErrNoProgress": true, + "ErrShortBuffer": true, + "ErrShortWrite": true, + "ErrUnexpectedEOF": true, + "LimitReader": true, + "LimitedReader": true, + "MultiReader": true, + "MultiWriter": true, + "NewSectionReader": true, + "Pipe": true, + "PipeReader": true, + "PipeWriter": true, + "ReadAtLeast": true, + "ReadCloser": true, + "ReadFull": true, + "ReadSeeker": true, + "ReadWriteCloser": true, + "ReadWriteSeeker": true, + "ReadWriter": true, + "Reader": true, + "ReaderAt": true, + "ReaderFrom": true, + "RuneReader": true, + "RuneScanner": true, + "SectionReader": true, + "SeekCurrent": true, + "SeekEnd": true, + "SeekStart": true, + "Seeker": true, + "StringWriter": true, + "TeeReader": true, + "WriteCloser": true, + "WriteSeeker": true, + "WriteString": true, + "Writer": true, + "WriterAt": true, + "WriterTo": true, + }, + "io/ioutil": map[string]bool{ + "Discard": true, + "NopCloser": true, + "ReadAll": true, + "ReadDir": true, + "ReadFile": true, + "TempDir": true, + "TempFile": true, + "WriteFile": true, + }, + "log": map[string]bool{ + "Fatal": true, + "Fatalf": true, + "Fatalln": true, + "Flags": true, + "LUTC": true, + "Ldate": true, + "Llongfile": true, + "Lmicroseconds": true, + "Logger": true, + "Lshortfile": true, + "LstdFlags": true, + "Ltime": true, + "New": true, + "Output": true, + "Panic": true, + "Panicf": true, + "Panicln": true, + "Prefix": true, + "Print": true, + "Printf": true, + "Println": true, + "SetFlags": true, + "SetOutput": true, + "SetPrefix": true, + "Writer": true, + }, + "log/syslog": map[string]bool{ + "Dial": true, + "LOG_ALERT": true, + "LOG_AUTH": true, + "LOG_AUTHPRIV": true, + "LOG_CRIT": true, + "LOG_CRON": true, + "LOG_DAEMON": true, + "LOG_DEBUG": true, + "LOG_EMERG": true, + "LOG_ERR": true, + "LOG_FTP": true, + "LOG_INFO": true, + "LOG_KERN": true, + "LOG_LOCAL0": true, + "LOG_LOCAL1": true, + "LOG_LOCAL2": true, + "LOG_LOCAL3": true, + "LOG_LOCAL4": true, + "LOG_LOCAL5": true, + "LOG_LOCAL6": true, + "LOG_LOCAL7": true, + "LOG_LPR": true, + "LOG_MAIL": true, + "LOG_NEWS": true, + "LOG_NOTICE": true, + "LOG_SYSLOG": true, + "LOG_USER": true, + "LOG_UUCP": true, + "LOG_WARNING": true, + "New": true, + "NewLogger": true, + "Priority": true, + "Writer": true, + }, + "math": map[string]bool{ + "Abs": true, + "Acos": true, + "Acosh": true, + "Asin": true, + "Asinh": true, + "Atan": true, + "Atan2": true, + "Atanh": true, + "Cbrt": true, + "Ceil": true, + "Copysign": true, + "Cos": true, + "Cosh": true, + "Dim": true, + "E": true, + "Erf": true, + "Erfc": true, + "Erfcinv": true, + "Erfinv": true, + "Exp": true, + "Exp2": true, + "Expm1": true, + "Float32bits": true, + "Float32frombits": true, + "Float64bits": true, + "Float64frombits": true, + "Floor": true, + "Frexp": true, + "Gamma": true, + "Hypot": true, + "Ilogb": true, + "Inf": true, + "IsInf": true, + "IsNaN": true, + "J0": true, + "J1": true, + "Jn": true, + "Ldexp": true, + "Lgamma": true, + "Ln10": true, + "Ln2": true, + "Log": true, + "Log10": true, + "Log10E": true, + "Log1p": true, + "Log2": true, + "Log2E": true, + "Logb": true, + "Max": true, + "MaxFloat32": true, + "MaxFloat64": true, + "MaxInt16": true, + "MaxInt32": true, + "MaxInt64": true, + "MaxInt8": true, + "MaxUint16": true, + "MaxUint32": true, + "MaxUint64": true, + "MaxUint8": true, + "Min": true, + "MinInt16": true, + "MinInt32": true, + "MinInt64": true, + "MinInt8": true, + "Mod": true, + "Modf": true, + "NaN": true, + "Nextafter": true, + "Nextafter32": true, + "Phi": true, + "Pi": true, + "Pow": true, + "Pow10": true, + "Remainder": true, + "Round": true, + "RoundToEven": true, + "Signbit": true, + "Sin": true, + "Sincos": true, + "Sinh": true, + "SmallestNonzeroFloat32": true, + "SmallestNonzeroFloat64": true, + "Sqrt": true, + "Sqrt2": true, + "SqrtE": true, + "SqrtPhi": true, + "SqrtPi": true, + "Tan": true, + "Tanh": true, + "Trunc": true, + "Y0": true, + "Y1": true, + "Yn": true, + }, + "math/big": map[string]bool{ + "Above": true, + "Accuracy": true, + "AwayFromZero": true, + "Below": true, + "ErrNaN": true, + "Exact": true, + "Float": true, + "Int": true, + "Jacobi": true, + "MaxBase": true, + "MaxExp": true, + "MaxPrec": true, + "MinExp": true, + "NewFloat": true, + "NewInt": true, + "NewRat": true, + "ParseFloat": true, + "Rat": true, + "RoundingMode": true, + "ToNearestAway": true, + "ToNearestEven": true, + "ToNegativeInf": true, + "ToPositiveInf": true, + "ToZero": true, + "Word": true, + }, + "math/bits": map[string]bool{ + "Add": true, + "Add32": true, + "Add64": true, + "Div": true, + "Div32": true, + "Div64": true, + "LeadingZeros": true, + "LeadingZeros16": true, + "LeadingZeros32": true, + "LeadingZeros64": true, + "LeadingZeros8": true, + "Len": true, + "Len16": true, + "Len32": true, + "Len64": true, + "Len8": true, + "Mul": true, + "Mul32": true, + "Mul64": true, + "OnesCount": true, + "OnesCount16": true, + "OnesCount32": true, + "OnesCount64": true, + "OnesCount8": true, + "Reverse": true, + "Reverse16": true, + "Reverse32": true, + "Reverse64": true, + "Reverse8": true, + "ReverseBytes": true, + "ReverseBytes16": true, + "ReverseBytes32": true, + "ReverseBytes64": true, + "RotateLeft": true, + "RotateLeft16": true, + "RotateLeft32": true, + "RotateLeft64": true, + "RotateLeft8": true, + "Sub": true, + "Sub32": true, + "Sub64": true, + "TrailingZeros": true, + "TrailingZeros16": true, + "TrailingZeros32": true, + "TrailingZeros64": true, + "TrailingZeros8": true, + "UintSize": true, + }, + "math/cmplx": map[string]bool{ + "Abs": true, + "Acos": true, + "Acosh": true, + "Asin": true, + "Asinh": true, + "Atan": true, + "Atanh": true, + "Conj": true, + "Cos": true, + "Cosh": true, + "Cot": true, + "Exp": true, + "Inf": true, + "IsInf": true, + "IsNaN": true, + "Log": true, + "Log10": true, + "NaN": true, + "Phase": true, + "Polar": true, + "Pow": true, + "Rect": true, + "Sin": true, + "Sinh": true, + "Sqrt": true, + "Tan": true, + "Tanh": true, + }, + "math/rand": map[string]bool{ + "ExpFloat64": true, + "Float32": true, + "Float64": true, + "Int": true, + "Int31": true, + "Int31n": true, + "Int63": true, + "Int63n": true, + "Intn": true, + "New": true, + "NewSource": true, + "NewZipf": true, + "NormFloat64": true, + "Perm": true, + "Rand": true, + "Read": true, + "Seed": true, + "Shuffle": true, + "Source": true, + "Source64": true, + "Uint32": true, + "Uint64": true, + "Zipf": true, + }, + "mime": map[string]bool{ + "AddExtensionType": true, + "BEncoding": true, + "ErrInvalidMediaParameter": true, + "ExtensionsByType": true, + "FormatMediaType": true, + "ParseMediaType": true, + "QEncoding": true, + "TypeByExtension": true, + "WordDecoder": true, + "WordEncoder": true, + }, + "mime/multipart": map[string]bool{ + "ErrMessageTooLarge": true, + "File": true, + "FileHeader": true, + "Form": true, + "NewReader": true, + "NewWriter": true, + "Part": true, + "Reader": true, + "Writer": true, + }, + "mime/quotedprintable": map[string]bool{ + "NewReader": true, + "NewWriter": true, + "Reader": true, + "Writer": true, + }, + "net": map[string]bool{ + "Addr": true, + "AddrError": true, + "Buffers": true, + "CIDRMask": true, + "Conn": true, + "DNSConfigError": true, + "DNSError": true, + "DefaultResolver": true, + "Dial": true, + "DialIP": true, + "DialTCP": true, + "DialTimeout": true, + "DialUDP": true, + "DialUnix": true, + "Dialer": true, + "ErrWriteToConnected": true, + "Error": true, + "FileConn": true, + "FileListener": true, + "FilePacketConn": true, + "FlagBroadcast": true, + "FlagLoopback": true, + "FlagMulticast": true, + "FlagPointToPoint": true, + "FlagUp": true, + "Flags": true, + "HardwareAddr": true, + "IP": true, + "IPAddr": true, + "IPConn": true, + "IPMask": true, + "IPNet": true, + "IPv4": true, + "IPv4Mask": true, + "IPv4allrouter": true, + "IPv4allsys": true, + "IPv4bcast": true, + "IPv4len": true, + "IPv4zero": true, + "IPv6interfacelocalallnodes": true, + "IPv6len": true, + "IPv6linklocalallnodes": true, + "IPv6linklocalallrouters": true, + "IPv6loopback": true, + "IPv6unspecified": true, + "IPv6zero": true, + "Interface": true, + "InterfaceAddrs": true, + "InterfaceByIndex": true, + "InterfaceByName": true, + "Interfaces": true, + "InvalidAddrError": true, + "JoinHostPort": true, + "Listen": true, + "ListenConfig": true, + "ListenIP": true, + "ListenMulticastUDP": true, + "ListenPacket": true, + "ListenTCP": true, + "ListenUDP": true, + "ListenUnix": true, + "ListenUnixgram": true, + "Listener": true, + "LookupAddr": true, + "LookupCNAME": true, + "LookupHost": true, + "LookupIP": true, + "LookupMX": true, + "LookupNS": true, + "LookupPort": true, + "LookupSRV": true, + "LookupTXT": true, + "MX": true, + "NS": true, + "OpError": true, + "PacketConn": true, + "ParseCIDR": true, + "ParseError": true, + "ParseIP": true, + "ParseMAC": true, + "Pipe": true, + "ResolveIPAddr": true, + "ResolveTCPAddr": true, + "ResolveUDPAddr": true, + "ResolveUnixAddr": true, + "Resolver": true, + "SRV": true, + "SplitHostPort": true, + "TCPAddr": true, + "TCPConn": true, + "TCPListener": true, + "UDPAddr": true, + "UDPConn": true, + "UnixAddr": true, + "UnixConn": true, + "UnixListener": true, + "UnknownNetworkError": true, + }, + "net/http": map[string]bool{ + "CanonicalHeaderKey": true, + "Client": true, + "CloseNotifier": true, + "ConnState": true, + "Cookie": true, + "CookieJar": true, + "DefaultClient": true, + "DefaultMaxHeaderBytes": true, + "DefaultMaxIdleConnsPerHost": true, + "DefaultServeMux": true, + "DefaultTransport": true, + "DetectContentType": true, + "Dir": true, + "ErrAbortHandler": true, + "ErrBodyNotAllowed": true, + "ErrBodyReadAfterClose": true, + "ErrContentLength": true, + "ErrHandlerTimeout": true, + "ErrHeaderTooLong": true, + "ErrHijacked": true, + "ErrLineTooLong": true, + "ErrMissingBoundary": true, + "ErrMissingContentLength": true, + "ErrMissingFile": true, + "ErrNoCookie": true, + "ErrNoLocation": true, + "ErrNotMultipart": true, + "ErrNotSupported": true, + "ErrServerClosed": true, + "ErrShortBody": true, + "ErrSkipAltProtocol": true, + "ErrUnexpectedTrailer": true, + "ErrUseLastResponse": true, + "ErrWriteAfterFlush": true, + "Error": true, + "File": true, + "FileServer": true, + "FileSystem": true, + "Flusher": true, + "Get": true, + "Handle": true, + "HandleFunc": true, + "Handler": true, + "HandlerFunc": true, + "Head": true, + "Header": true, + "Hijacker": true, + "ListenAndServe": true, + "ListenAndServeTLS": true, + "LocalAddrContextKey": true, + "MaxBytesReader": true, + "MethodConnect": true, + "MethodDelete": true, + "MethodGet": true, + "MethodHead": true, + "MethodOptions": true, + "MethodPatch": true, + "MethodPost": true, + "MethodPut": true, + "MethodTrace": true, + "NewFileTransport": true, + "NewRequest": true, + "NewRequestWithContext": true, + "NewServeMux": true, + "NoBody": true, + "NotFound": true, + "NotFoundHandler": true, + "ParseHTTPVersion": true, + "ParseTime": true, + "Post": true, + "PostForm": true, + "ProtocolError": true, + "ProxyFromEnvironment": true, + "ProxyURL": true, + "PushOptions": true, + "Pusher": true, + "ReadRequest": true, + "ReadResponse": true, + "Redirect": true, + "RedirectHandler": true, + "Request": true, + "Response": true, + "ResponseWriter": true, + "RoundTripper": true, + "SameSite": true, + "SameSiteDefaultMode": true, + "SameSiteLaxMode": true, + "SameSiteNoneMode": true, + "SameSiteStrictMode": true, + "Serve": true, + "ServeContent": true, + "ServeFile": true, + "ServeMux": true, + "ServeTLS": true, + "Server": true, + "ServerContextKey": true, + "SetCookie": true, + "StateActive": true, + "StateClosed": true, + "StateHijacked": true, + "StateIdle": true, + "StateNew": true, + "StatusAccepted": true, + "StatusAlreadyReported": true, + "StatusBadGateway": true, + "StatusBadRequest": true, + "StatusConflict": true, + "StatusContinue": true, + "StatusCreated": true, + "StatusEarlyHints": true, + "StatusExpectationFailed": true, + "StatusFailedDependency": true, + "StatusForbidden": true, + "StatusFound": true, + "StatusGatewayTimeout": true, + "StatusGone": true, + "StatusHTTPVersionNotSupported": true, + "StatusIMUsed": true, + "StatusInsufficientStorage": true, + "StatusInternalServerError": true, + "StatusLengthRequired": true, + "StatusLocked": true, + "StatusLoopDetected": true, + "StatusMethodNotAllowed": true, + "StatusMisdirectedRequest": true, + "StatusMovedPermanently": true, + "StatusMultiStatus": true, + "StatusMultipleChoices": true, + "StatusNetworkAuthenticationRequired": true, + "StatusNoContent": true, + "StatusNonAuthoritativeInfo": true, + "StatusNotAcceptable": true, + "StatusNotExtended": true, + "StatusNotFound": true, + "StatusNotImplemented": true, + "StatusNotModified": true, + "StatusOK": true, + "StatusPartialContent": true, + "StatusPaymentRequired": true, + "StatusPermanentRedirect": true, + "StatusPreconditionFailed": true, + "StatusPreconditionRequired": true, + "StatusProcessing": true, + "StatusProxyAuthRequired": true, + "StatusRequestEntityTooLarge": true, + "StatusRequestHeaderFieldsTooLarge": true, + "StatusRequestTimeout": true, + "StatusRequestURITooLong": true, + "StatusRequestedRangeNotSatisfiable": true, + "StatusResetContent": true, + "StatusSeeOther": true, + "StatusServiceUnavailable": true, + "StatusSwitchingProtocols": true, + "StatusTeapot": true, + "StatusTemporaryRedirect": true, + "StatusText": true, + "StatusTooEarly": true, + "StatusTooManyRequests": true, + "StatusUnauthorized": true, + "StatusUnavailableForLegalReasons": true, + "StatusUnprocessableEntity": true, + "StatusUnsupportedMediaType": true, + "StatusUpgradeRequired": true, + "StatusUseProxy": true, + "StatusVariantAlsoNegotiates": true, + "StripPrefix": true, + "TimeFormat": true, + "TimeoutHandler": true, + "TrailerPrefix": true, + "Transport": true, + }, + "net/http/cgi": map[string]bool{ + "Handler": true, + "Request": true, + "RequestFromMap": true, + "Serve": true, + }, + "net/http/cookiejar": map[string]bool{ + "Jar": true, + "New": true, + "Options": true, + "PublicSuffixList": true, + }, + "net/http/fcgi": map[string]bool{ + "ErrConnClosed": true, + "ErrRequestAborted": true, + "ProcessEnv": true, + "Serve": true, + }, + "net/http/httptest": map[string]bool{ + "DefaultRemoteAddr": true, + "NewRecorder": true, + "NewRequest": true, + "NewServer": true, + "NewTLSServer": true, + "NewUnstartedServer": true, + "ResponseRecorder": true, + "Server": true, + }, + "net/http/httptrace": map[string]bool{ + "ClientTrace": true, + "ContextClientTrace": true, + "DNSDoneInfo": true, + "DNSStartInfo": true, + "GotConnInfo": true, + "WithClientTrace": true, + "WroteRequestInfo": true, + }, + "net/http/httputil": map[string]bool{ + "BufferPool": true, + "ClientConn": true, + "DumpRequest": true, + "DumpRequestOut": true, + "DumpResponse": true, + "ErrClosed": true, + "ErrLineTooLong": true, + "ErrPersistEOF": true, + "ErrPipeline": true, + "NewChunkedReader": true, + "NewChunkedWriter": true, + "NewClientConn": true, + "NewProxyClientConn": true, + "NewServerConn": true, + "NewSingleHostReverseProxy": true, + "ReverseProxy": true, + "ServerConn": true, + }, + "net/http/pprof": map[string]bool{ + "Cmdline": true, + "Handler": true, + "Index": true, + "Profile": true, + "Symbol": true, + "Trace": true, + }, + "net/mail": map[string]bool{ + "Address": true, + "AddressParser": true, + "ErrHeaderNotPresent": true, + "Header": true, + "Message": true, + "ParseAddress": true, + "ParseAddressList": true, + "ParseDate": true, + "ReadMessage": true, + }, + "net/rpc": map[string]bool{ + "Accept": true, + "Call": true, + "Client": true, + "ClientCodec": true, + "DefaultDebugPath": true, + "DefaultRPCPath": true, + "DefaultServer": true, + "Dial": true, + "DialHTTP": true, + "DialHTTPPath": true, + "ErrShutdown": true, + "HandleHTTP": true, + "NewClient": true, + "NewClientWithCodec": true, + "NewServer": true, + "Register": true, + "RegisterName": true, + "Request": true, + "Response": true, + "ServeCodec": true, + "ServeConn": true, + "ServeRequest": true, + "Server": true, + "ServerCodec": true, + "ServerError": true, + }, + "net/rpc/jsonrpc": map[string]bool{ + "Dial": true, + "NewClient": true, + "NewClientCodec": true, + "NewServerCodec": true, + "ServeConn": true, + }, + "net/smtp": map[string]bool{ + "Auth": true, + "CRAMMD5Auth": true, + "Client": true, + "Dial": true, + "NewClient": true, + "PlainAuth": true, + "SendMail": true, + "ServerInfo": true, + }, + "net/textproto": map[string]bool{ + "CanonicalMIMEHeaderKey": true, + "Conn": true, + "Dial": true, + "Error": true, + "MIMEHeader": true, + "NewConn": true, + "NewReader": true, + "NewWriter": true, + "Pipeline": true, + "ProtocolError": true, + "Reader": true, + "TrimBytes": true, + "TrimString": true, + "Writer": true, + }, + "net/url": map[string]bool{ + "Error": true, + "EscapeError": true, + "InvalidHostError": true, + "Parse": true, + "ParseQuery": true, + "ParseRequestURI": true, + "PathEscape": true, + "PathUnescape": true, + "QueryEscape": true, + "QueryUnescape": true, + "URL": true, + "User": true, + "UserPassword": true, + "Userinfo": true, + "Values": true, + }, + "os": map[string]bool{ + "Args": true, + "Chdir": true, + "Chmod": true, + "Chown": true, + "Chtimes": true, + "Clearenv": true, + "Create": true, + "DevNull": true, + "Environ": true, + "ErrClosed": true, + "ErrExist": true, + "ErrInvalid": true, + "ErrNoDeadline": true, + "ErrNotExist": true, + "ErrPermission": true, + "Executable": true, + "Exit": true, + "Expand": true, + "ExpandEnv": true, + "File": true, + "FileInfo": true, + "FileMode": true, + "FindProcess": true, + "Getegid": true, + "Getenv": true, + "Geteuid": true, + "Getgid": true, + "Getgroups": true, + "Getpagesize": true, + "Getpid": true, + "Getppid": true, + "Getuid": true, + "Getwd": true, + "Hostname": true, + "Interrupt": true, + "IsExist": true, + "IsNotExist": true, + "IsPathSeparator": true, + "IsPermission": true, + "IsTimeout": true, + "Kill": true, + "Lchown": true, + "Link": true, + "LinkError": true, + "LookupEnv": true, + "Lstat": true, + "Mkdir": true, + "MkdirAll": true, + "ModeAppend": true, + "ModeCharDevice": true, + "ModeDevice": true, + "ModeDir": true, + "ModeExclusive": true, + "ModeIrregular": true, + "ModeNamedPipe": true, + "ModePerm": true, + "ModeSetgid": true, + "ModeSetuid": true, + "ModeSocket": true, + "ModeSticky": true, + "ModeSymlink": true, + "ModeTemporary": true, + "ModeType": true, + "NewFile": true, + "NewSyscallError": true, + "O_APPEND": true, + "O_CREATE": true, + "O_EXCL": true, + "O_RDONLY": true, + "O_RDWR": true, + "O_SYNC": true, + "O_TRUNC": true, + "O_WRONLY": true, + "Open": true, + "OpenFile": true, + "PathError": true, + "PathListSeparator": true, + "PathSeparator": true, + "Pipe": true, + "ProcAttr": true, + "Process": true, + "ProcessState": true, + "Readlink": true, + "Remove": true, + "RemoveAll": true, + "Rename": true, + "SEEK_CUR": true, + "SEEK_END": true, + "SEEK_SET": true, + "SameFile": true, + "Setenv": true, + "Signal": true, + "StartProcess": true, + "Stat": true, + "Stderr": true, + "Stdin": true, + "Stdout": true, + "Symlink": true, + "SyscallError": true, + "TempDir": true, + "Truncate": true, + "Unsetenv": true, + "UserCacheDir": true, + "UserConfigDir": true, + "UserHomeDir": true, + }, + "os/exec": map[string]bool{ + "Cmd": true, + "Command": true, + "CommandContext": true, + "ErrNotFound": true, + "Error": true, + "ExitError": true, + "LookPath": true, + }, + "os/signal": map[string]bool{ + "Ignore": true, + "Ignored": true, + "Notify": true, + "Reset": true, + "Stop": true, + }, + "os/user": map[string]bool{ + "Current": true, + "Group": true, + "Lookup": true, + "LookupGroup": true, + "LookupGroupId": true, + "LookupId": true, + "UnknownGroupError": true, + "UnknownGroupIdError": true, + "UnknownUserError": true, + "UnknownUserIdError": true, + "User": true, + }, + "path": map[string]bool{ + "Base": true, + "Clean": true, + "Dir": true, + "ErrBadPattern": true, + "Ext": true, + "IsAbs": true, + "Join": true, + "Match": true, + "Split": true, + }, + "path/filepath": map[string]bool{ + "Abs": true, + "Base": true, + "Clean": true, + "Dir": true, + "ErrBadPattern": true, + "EvalSymlinks": true, + "Ext": true, + "FromSlash": true, + "Glob": true, + "HasPrefix": true, + "IsAbs": true, + "Join": true, + "ListSeparator": true, + "Match": true, + "Rel": true, + "Separator": true, + "SkipDir": true, + "Split": true, + "SplitList": true, + "ToSlash": true, + "VolumeName": true, + "Walk": true, + "WalkFunc": true, + }, + "plugin": map[string]bool{ + "Open": true, + "Plugin": true, + "Symbol": true, + }, + "reflect": map[string]bool{ + "Append": true, + "AppendSlice": true, + "Array": true, + "ArrayOf": true, + "Bool": true, + "BothDir": true, + "Chan": true, + "ChanDir": true, + "ChanOf": true, + "Complex128": true, + "Complex64": true, + "Copy": true, + "DeepEqual": true, + "Float32": true, + "Float64": true, + "Func": true, + "FuncOf": true, + "Indirect": true, + "Int": true, + "Int16": true, + "Int32": true, + "Int64": true, + "Int8": true, + "Interface": true, + "Invalid": true, + "Kind": true, + "MakeChan": true, + "MakeFunc": true, + "MakeMap": true, + "MakeMapWithSize": true, + "MakeSlice": true, + "Map": true, + "MapIter": true, + "MapOf": true, + "Method": true, + "New": true, + "NewAt": true, + "Ptr": true, + "PtrTo": true, + "RecvDir": true, + "Select": true, + "SelectCase": true, + "SelectDefault": true, + "SelectDir": true, + "SelectRecv": true, + "SelectSend": true, + "SendDir": true, + "Slice": true, + "SliceHeader": true, + "SliceOf": true, + "String": true, + "StringHeader": true, + "Struct": true, + "StructField": true, + "StructOf": true, + "StructTag": true, + "Swapper": true, + "Type": true, + "TypeOf": true, + "Uint": true, + "Uint16": true, + "Uint32": true, + "Uint64": true, + "Uint8": true, + "Uintptr": true, + "UnsafePointer": true, + "Value": true, + "ValueError": true, + "ValueOf": true, + "Zero": true, + }, + "regexp": map[string]bool{ + "Compile": true, + "CompilePOSIX": true, + "Match": true, + "MatchReader": true, + "MatchString": true, + "MustCompile": true, + "MustCompilePOSIX": true, + "QuoteMeta": true, + "Regexp": true, + }, + "regexp/syntax": map[string]bool{ + "ClassNL": true, + "Compile": true, + "DotNL": true, + "EmptyBeginLine": true, + "EmptyBeginText": true, + "EmptyEndLine": true, + "EmptyEndText": true, + "EmptyNoWordBoundary": true, + "EmptyOp": true, + "EmptyOpContext": true, + "EmptyWordBoundary": true, + "ErrInternalError": true, + "ErrInvalidCharClass": true, + "ErrInvalidCharRange": true, + "ErrInvalidEscape": true, + "ErrInvalidNamedCapture": true, + "ErrInvalidPerlOp": true, + "ErrInvalidRepeatOp": true, + "ErrInvalidRepeatSize": true, + "ErrInvalidUTF8": true, + "ErrMissingBracket": true, + "ErrMissingParen": true, + "ErrMissingRepeatArgument": true, + "ErrTrailingBackslash": true, + "ErrUnexpectedParen": true, + "Error": true, + "ErrorCode": true, + "Flags": true, + "FoldCase": true, + "Inst": true, + "InstAlt": true, + "InstAltMatch": true, + "InstCapture": true, + "InstEmptyWidth": true, + "InstFail": true, + "InstMatch": true, + "InstNop": true, + "InstOp": true, + "InstRune": true, + "InstRune1": true, + "InstRuneAny": true, + "InstRuneAnyNotNL": true, + "IsWordChar": true, + "Literal": true, + "MatchNL": true, + "NonGreedy": true, + "OneLine": true, + "Op": true, + "OpAlternate": true, + "OpAnyChar": true, + "OpAnyCharNotNL": true, + "OpBeginLine": true, + "OpBeginText": true, + "OpCapture": true, + "OpCharClass": true, + "OpConcat": true, + "OpEmptyMatch": true, + "OpEndLine": true, + "OpEndText": true, + "OpLiteral": true, + "OpNoMatch": true, + "OpNoWordBoundary": true, + "OpPlus": true, + "OpQuest": true, + "OpRepeat": true, + "OpStar": true, + "OpWordBoundary": true, + "POSIX": true, + "Parse": true, + "Perl": true, + "PerlX": true, + "Prog": true, + "Regexp": true, + "Simple": true, + "UnicodeGroups": true, + "WasDollar": true, + }, + "runtime": map[string]bool{ + "BlockProfile": true, + "BlockProfileRecord": true, + "Breakpoint": true, + "CPUProfile": true, + "Caller": true, + "Callers": true, + "CallersFrames": true, + "Compiler": true, + "Error": true, + "Frame": true, + "Frames": true, + "Func": true, + "FuncForPC": true, + "GC": true, + "GOARCH": true, + "GOMAXPROCS": true, + "GOOS": true, + "GOROOT": true, + "Goexit": true, + "GoroutineProfile": true, + "Gosched": true, + "KeepAlive": true, + "LockOSThread": true, + "MemProfile": true, + "MemProfileRate": true, + "MemProfileRecord": true, + "MemStats": true, + "MutexProfile": true, + "NumCPU": true, + "NumCgoCall": true, + "NumGoroutine": true, + "ReadMemStats": true, + "ReadTrace": true, + "SetBlockProfileRate": true, + "SetCPUProfileRate": true, + "SetCgoTraceback": true, + "SetFinalizer": true, + "SetMutexProfileFraction": true, + "Stack": true, + "StackRecord": true, + "StartTrace": true, + "StopTrace": true, + "ThreadCreateProfile": true, + "TypeAssertionError": true, + "UnlockOSThread": true, + "Version": true, + }, + "runtime/debug": map[string]bool{ + "BuildInfo": true, + "FreeOSMemory": true, + "GCStats": true, + "Module": true, + "PrintStack": true, + "ReadBuildInfo": true, + "ReadGCStats": true, + "SetGCPercent": true, + "SetMaxStack": true, + "SetMaxThreads": true, + "SetPanicOnFault": true, + "SetTraceback": true, + "Stack": true, + "WriteHeapDump": true, + }, + "runtime/pprof": map[string]bool{ + "Do": true, + "ForLabels": true, + "Label": true, + "LabelSet": true, + "Labels": true, + "Lookup": true, + "NewProfile": true, + "Profile": true, + "Profiles": true, + "SetGoroutineLabels": true, + "StartCPUProfile": true, + "StopCPUProfile": true, + "WithLabels": true, + "WriteHeapProfile": true, + }, + "runtime/trace": map[string]bool{ + "IsEnabled": true, + "Log": true, + "Logf": true, + "NewTask": true, + "Region": true, + "Start": true, + "StartRegion": true, + "Stop": true, + "Task": true, + "WithRegion": true, + }, + "sort": map[string]bool{ + "Float64Slice": true, + "Float64s": true, + "Float64sAreSorted": true, + "IntSlice": true, + "Interface": true, + "Ints": true, + "IntsAreSorted": true, + "IsSorted": true, + "Reverse": true, + "Search": true, + "SearchFloat64s": true, + "SearchInts": true, + "SearchStrings": true, + "Slice": true, + "SliceIsSorted": true, + "SliceStable": true, + "Sort": true, + "Stable": true, + "StringSlice": true, + "Strings": true, + "StringsAreSorted": true, + }, + "strconv": map[string]bool{ + "AppendBool": true, + "AppendFloat": true, + "AppendInt": true, + "AppendQuote": true, + "AppendQuoteRune": true, + "AppendQuoteRuneToASCII": true, + "AppendQuoteRuneToGraphic": true, + "AppendQuoteToASCII": true, + "AppendQuoteToGraphic": true, + "AppendUint": true, + "Atoi": true, + "CanBackquote": true, + "ErrRange": true, + "ErrSyntax": true, + "FormatBool": true, + "FormatFloat": true, + "FormatInt": true, + "FormatUint": true, + "IntSize": true, + "IsGraphic": true, + "IsPrint": true, + "Itoa": true, + "NumError": true, + "ParseBool": true, + "ParseFloat": true, + "ParseInt": true, + "ParseUint": true, + "Quote": true, + "QuoteRune": true, + "QuoteRuneToASCII": true, + "QuoteRuneToGraphic": true, + "QuoteToASCII": true, + "QuoteToGraphic": true, + "Unquote": true, + "UnquoteChar": true, + }, + "strings": map[string]bool{ + "Builder": true, + "Compare": true, + "Contains": true, + "ContainsAny": true, + "ContainsRune": true, + "Count": true, + "EqualFold": true, + "Fields": true, + "FieldsFunc": true, + "HasPrefix": true, + "HasSuffix": true, + "Index": true, + "IndexAny": true, + "IndexByte": true, + "IndexFunc": true, + "IndexRune": true, + "Join": true, + "LastIndex": true, + "LastIndexAny": true, + "LastIndexByte": true, + "LastIndexFunc": true, + "Map": true, + "NewReader": true, + "NewReplacer": true, + "Reader": true, + "Repeat": true, + "Replace": true, + "ReplaceAll": true, + "Replacer": true, + "Split": true, + "SplitAfter": true, + "SplitAfterN": true, + "SplitN": true, + "Title": true, + "ToLower": true, + "ToLowerSpecial": true, + "ToTitle": true, + "ToTitleSpecial": true, + "ToUpper": true, + "ToUpperSpecial": true, + "ToValidUTF8": true, + "Trim": true, + "TrimFunc": true, + "TrimLeft": true, + "TrimLeftFunc": true, + "TrimPrefix": true, + "TrimRight": true, + "TrimRightFunc": true, + "TrimSpace": true, + "TrimSuffix": true, + }, + "sync": map[string]bool{ + "Cond": true, + "Locker": true, + "Map": true, + "Mutex": true, + "NewCond": true, + "Once": true, + "Pool": true, + "RWMutex": true, + "WaitGroup": true, + }, + "sync/atomic": map[string]bool{ + "AddInt32": true, + "AddInt64": true, + "AddUint32": true, + "AddUint64": true, + "AddUintptr": true, + "CompareAndSwapInt32": true, + "CompareAndSwapInt64": true, + "CompareAndSwapPointer": true, + "CompareAndSwapUint32": true, + "CompareAndSwapUint64": true, + "CompareAndSwapUintptr": true, + "LoadInt32": true, + "LoadInt64": true, + "LoadPointer": true, + "LoadUint32": true, + "LoadUint64": true, + "LoadUintptr": true, + "StoreInt32": true, + "StoreInt64": true, + "StorePointer": true, + "StoreUint32": true, + "StoreUint64": true, + "StoreUintptr": true, + "SwapInt32": true, + "SwapInt64": true, + "SwapPointer": true, + "SwapUint32": true, + "SwapUint64": true, + "SwapUintptr": true, + "Value": true, + }, + "syscall": map[string]bool{ + "AF_ALG": true, + "AF_APPLETALK": true, + "AF_ARP": true, + "AF_ASH": true, + "AF_ATM": true, + "AF_ATMPVC": true, + "AF_ATMSVC": true, + "AF_AX25": true, + "AF_BLUETOOTH": true, + "AF_BRIDGE": true, + "AF_CAIF": true, + "AF_CAN": true, + "AF_CCITT": true, + "AF_CHAOS": true, + "AF_CNT": true, + "AF_COIP": true, + "AF_DATAKIT": true, + "AF_DECnet": true, + "AF_DLI": true, + "AF_E164": true, + "AF_ECMA": true, + "AF_ECONET": true, + "AF_ENCAP": true, + "AF_FILE": true, + "AF_HYLINK": true, + "AF_IEEE80211": true, + "AF_IEEE802154": true, + "AF_IMPLINK": true, + "AF_INET": true, + "AF_INET6": true, + "AF_INET6_SDP": true, + "AF_INET_SDP": true, + "AF_IPX": true, + "AF_IRDA": true, + "AF_ISDN": true, + "AF_ISO": true, + "AF_IUCV": true, + "AF_KEY": true, + "AF_LAT": true, + "AF_LINK": true, + "AF_LLC": true, + "AF_LOCAL": true, + "AF_MAX": true, + "AF_MPLS": true, + "AF_NATM": true, + "AF_NDRV": true, + "AF_NETBEUI": true, + "AF_NETBIOS": true, + "AF_NETGRAPH": true, + "AF_NETLINK": true, + "AF_NETROM": true, + "AF_NS": true, + "AF_OROUTE": true, + "AF_OSI": true, + "AF_PACKET": true, + "AF_PHONET": true, + "AF_PPP": true, + "AF_PPPOX": true, + "AF_PUP": true, + "AF_RDS": true, + "AF_RESERVED_36": true, + "AF_ROSE": true, + "AF_ROUTE": true, + "AF_RXRPC": true, + "AF_SCLUSTER": true, + "AF_SECURITY": true, + "AF_SIP": true, + "AF_SLOW": true, + "AF_SNA": true, + "AF_SYSTEM": true, + "AF_TIPC": true, + "AF_UNIX": true, + "AF_UNSPEC": true, + "AF_VENDOR00": true, + "AF_VENDOR01": true, + "AF_VENDOR02": true, + "AF_VENDOR03": true, + "AF_VENDOR04": true, + "AF_VENDOR05": true, + "AF_VENDOR06": true, + "AF_VENDOR07": true, + "AF_VENDOR08": true, + "AF_VENDOR09": true, + "AF_VENDOR10": true, + "AF_VENDOR11": true, + "AF_VENDOR12": true, + "AF_VENDOR13": true, + "AF_VENDOR14": true, + "AF_VENDOR15": true, + "AF_VENDOR16": true, + "AF_VENDOR17": true, + "AF_VENDOR18": true, + "AF_VENDOR19": true, + "AF_VENDOR20": true, + "AF_VENDOR21": true, + "AF_VENDOR22": true, + "AF_VENDOR23": true, + "AF_VENDOR24": true, + "AF_VENDOR25": true, + "AF_VENDOR26": true, + "AF_VENDOR27": true, + "AF_VENDOR28": true, + "AF_VENDOR29": true, + "AF_VENDOR30": true, + "AF_VENDOR31": true, + "AF_VENDOR32": true, + "AF_VENDOR33": true, + "AF_VENDOR34": true, + "AF_VENDOR35": true, + "AF_VENDOR36": true, + "AF_VENDOR37": true, + "AF_VENDOR38": true, + "AF_VENDOR39": true, + "AF_VENDOR40": true, + "AF_VENDOR41": true, + "AF_VENDOR42": true, + "AF_VENDOR43": true, + "AF_VENDOR44": true, + "AF_VENDOR45": true, + "AF_VENDOR46": true, + "AF_VENDOR47": true, + "AF_WANPIPE": true, + "AF_X25": true, + "AI_CANONNAME": true, + "AI_NUMERICHOST": true, + "AI_PASSIVE": true, + "APPLICATION_ERROR": true, + "ARPHRD_ADAPT": true, + "ARPHRD_APPLETLK": true, + "ARPHRD_ARCNET": true, + "ARPHRD_ASH": true, + "ARPHRD_ATM": true, + "ARPHRD_AX25": true, + "ARPHRD_BIF": true, + "ARPHRD_CHAOS": true, + "ARPHRD_CISCO": true, + "ARPHRD_CSLIP": true, + "ARPHRD_CSLIP6": true, + "ARPHRD_DDCMP": true, + "ARPHRD_DLCI": true, + "ARPHRD_ECONET": true, + "ARPHRD_EETHER": true, + "ARPHRD_ETHER": true, + "ARPHRD_EUI64": true, + "ARPHRD_FCAL": true, + "ARPHRD_FCFABRIC": true, + "ARPHRD_FCPL": true, + "ARPHRD_FCPP": true, + "ARPHRD_FDDI": true, + "ARPHRD_FRAD": true, + "ARPHRD_FRELAY": true, + "ARPHRD_HDLC": true, + "ARPHRD_HIPPI": true, + "ARPHRD_HWX25": true, + "ARPHRD_IEEE1394": true, + "ARPHRD_IEEE802": true, + "ARPHRD_IEEE80211": true, + "ARPHRD_IEEE80211_PRISM": true, + "ARPHRD_IEEE80211_RADIOTAP": true, + "ARPHRD_IEEE802154": true, + "ARPHRD_IEEE802154_PHY": true, + "ARPHRD_IEEE802_TR": true, + "ARPHRD_INFINIBAND": true, + "ARPHRD_IPDDP": true, + "ARPHRD_IPGRE": true, + "ARPHRD_IRDA": true, + "ARPHRD_LAPB": true, + "ARPHRD_LOCALTLK": true, + "ARPHRD_LOOPBACK": true, + "ARPHRD_METRICOM": true, + "ARPHRD_NETROM": true, + "ARPHRD_NONE": true, + "ARPHRD_PIMREG": true, + "ARPHRD_PPP": true, + "ARPHRD_PRONET": true, + "ARPHRD_RAWHDLC": true, + "ARPHRD_ROSE": true, + "ARPHRD_RSRVD": true, + "ARPHRD_SIT": true, + "ARPHRD_SKIP": true, + "ARPHRD_SLIP": true, + "ARPHRD_SLIP6": true, + "ARPHRD_STRIP": true, + "ARPHRD_TUNNEL": true, + "ARPHRD_TUNNEL6": true, + "ARPHRD_VOID": true, + "ARPHRD_X25": true, + "AUTHTYPE_CLIENT": true, + "AUTHTYPE_SERVER": true, + "Accept": true, + "Accept4": true, + "AcceptEx": true, + "Access": true, + "Acct": true, + "AddrinfoW": true, + "Adjtime": true, + "Adjtimex": true, + "AttachLsf": true, + "B0": true, + "B1000000": true, + "B110": true, + "B115200": true, + "B1152000": true, + "B1200": true, + "B134": true, + "B14400": true, + "B150": true, + "B1500000": true, + "B1800": true, + "B19200": true, + "B200": true, + "B2000000": true, + "B230400": true, + "B2400": true, + "B2500000": true, + "B28800": true, + "B300": true, + "B3000000": true, + "B3500000": true, + "B38400": true, + "B4000000": true, + "B460800": true, + "B4800": true, + "B50": true, + "B500000": true, + "B57600": true, + "B576000": true, + "B600": true, + "B7200": true, + "B75": true, + "B76800": true, + "B921600": true, + "B9600": true, + "BASE_PROTOCOL": true, + "BIOCFEEDBACK": true, + "BIOCFLUSH": true, + "BIOCGBLEN": true, + "BIOCGDIRECTION": true, + "BIOCGDIRFILT": true, + "BIOCGDLT": true, + "BIOCGDLTLIST": true, + "BIOCGETBUFMODE": true, + "BIOCGETIF": true, + "BIOCGETZMAX": true, + "BIOCGFEEDBACK": true, + "BIOCGFILDROP": true, + "BIOCGHDRCMPLT": true, + "BIOCGRSIG": true, + "BIOCGRTIMEOUT": true, + "BIOCGSEESENT": true, + "BIOCGSTATS": true, + "BIOCGSTATSOLD": true, + "BIOCGTSTAMP": true, + "BIOCIMMEDIATE": true, + "BIOCLOCK": true, + "BIOCPROMISC": true, + "BIOCROTZBUF": true, + "BIOCSBLEN": true, + "BIOCSDIRECTION": true, + "BIOCSDIRFILT": true, + "BIOCSDLT": true, + "BIOCSETBUFMODE": true, + "BIOCSETF": true, + "BIOCSETFNR": true, + "BIOCSETIF": true, + "BIOCSETWF": true, + "BIOCSETZBUF": true, + "BIOCSFEEDBACK": true, + "BIOCSFILDROP": true, + "BIOCSHDRCMPLT": true, + "BIOCSRSIG": true, + "BIOCSRTIMEOUT": true, + "BIOCSSEESENT": true, + "BIOCSTCPF": true, + "BIOCSTSTAMP": true, + "BIOCSUDPF": true, + "BIOCVERSION": true, + "BPF_A": true, + "BPF_ABS": true, + "BPF_ADD": true, + "BPF_ALIGNMENT": true, + "BPF_ALIGNMENT32": true, + "BPF_ALU": true, + "BPF_AND": true, + "BPF_B": true, + "BPF_BUFMODE_BUFFER": true, + "BPF_BUFMODE_ZBUF": true, + "BPF_DFLTBUFSIZE": true, + "BPF_DIRECTION_IN": true, + "BPF_DIRECTION_OUT": true, + "BPF_DIV": true, + "BPF_H": true, + "BPF_IMM": true, + "BPF_IND": true, + "BPF_JA": true, + "BPF_JEQ": true, + "BPF_JGE": true, + "BPF_JGT": true, + "BPF_JMP": true, + "BPF_JSET": true, + "BPF_K": true, + "BPF_LD": true, + "BPF_LDX": true, + "BPF_LEN": true, + "BPF_LSH": true, + "BPF_MAJOR_VERSION": true, + "BPF_MAXBUFSIZE": true, + "BPF_MAXINSNS": true, + "BPF_MEM": true, + "BPF_MEMWORDS": true, + "BPF_MINBUFSIZE": true, + "BPF_MINOR_VERSION": true, + "BPF_MISC": true, + "BPF_MSH": true, + "BPF_MUL": true, + "BPF_NEG": true, + "BPF_OR": true, + "BPF_RELEASE": true, + "BPF_RET": true, + "BPF_RSH": true, + "BPF_ST": true, + "BPF_STX": true, + "BPF_SUB": true, + "BPF_TAX": true, + "BPF_TXA": true, + "BPF_T_BINTIME": true, + "BPF_T_BINTIME_FAST": true, + "BPF_T_BINTIME_MONOTONIC": true, + "BPF_T_BINTIME_MONOTONIC_FAST": true, + "BPF_T_FAST": true, + "BPF_T_FLAG_MASK": true, + "BPF_T_FORMAT_MASK": true, + "BPF_T_MICROTIME": true, + "BPF_T_MICROTIME_FAST": true, + "BPF_T_MICROTIME_MONOTONIC": true, + "BPF_T_MICROTIME_MONOTONIC_FAST": true, + "BPF_T_MONOTONIC": true, + "BPF_T_MONOTONIC_FAST": true, + "BPF_T_NANOTIME": true, + "BPF_T_NANOTIME_FAST": true, + "BPF_T_NANOTIME_MONOTONIC": true, + "BPF_T_NANOTIME_MONOTONIC_FAST": true, + "BPF_T_NONE": true, + "BPF_T_NORMAL": true, + "BPF_W": true, + "BPF_X": true, + "BRKINT": true, + "Bind": true, + "BindToDevice": true, + "BpfBuflen": true, + "BpfDatalink": true, + "BpfHdr": true, + "BpfHeadercmpl": true, + "BpfInsn": true, + "BpfInterface": true, + "BpfJump": true, + "BpfProgram": true, + "BpfStat": true, + "BpfStats": true, + "BpfStmt": true, + "BpfTimeout": true, + "BpfTimeval": true, + "BpfVersion": true, + "BpfZbuf": true, + "BpfZbufHeader": true, + "ByHandleFileInformation": true, + "BytePtrFromString": true, + "ByteSliceFromString": true, + "CCR0_FLUSH": true, + "CERT_CHAIN_POLICY_AUTHENTICODE": true, + "CERT_CHAIN_POLICY_AUTHENTICODE_TS": true, + "CERT_CHAIN_POLICY_BASE": true, + "CERT_CHAIN_POLICY_BASIC_CONSTRAINTS": true, + "CERT_CHAIN_POLICY_EV": true, + "CERT_CHAIN_POLICY_MICROSOFT_ROOT": true, + "CERT_CHAIN_POLICY_NT_AUTH": true, + "CERT_CHAIN_POLICY_SSL": true, + "CERT_E_CN_NO_MATCH": true, + "CERT_E_EXPIRED": true, + "CERT_E_PURPOSE": true, + "CERT_E_ROLE": true, + "CERT_E_UNTRUSTEDROOT": true, + "CERT_STORE_ADD_ALWAYS": true, + "CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG": true, + "CERT_STORE_PROV_MEMORY": true, + "CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT": true, + "CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT": true, + "CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT": true, + "CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT": true, + "CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT": true, + "CERT_TRUST_INVALID_BASIC_CONSTRAINTS": true, + "CERT_TRUST_INVALID_EXTENSION": true, + "CERT_TRUST_INVALID_NAME_CONSTRAINTS": true, + "CERT_TRUST_INVALID_POLICY_CONSTRAINTS": true, + "CERT_TRUST_IS_CYCLIC": true, + "CERT_TRUST_IS_EXPLICIT_DISTRUST": true, + "CERT_TRUST_IS_NOT_SIGNATURE_VALID": true, + "CERT_TRUST_IS_NOT_TIME_VALID": true, + "CERT_TRUST_IS_NOT_VALID_FOR_USAGE": true, + "CERT_TRUST_IS_OFFLINE_REVOCATION": true, + "CERT_TRUST_IS_REVOKED": true, + "CERT_TRUST_IS_UNTRUSTED_ROOT": true, + "CERT_TRUST_NO_ERROR": true, + "CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY": true, + "CERT_TRUST_REVOCATION_STATUS_UNKNOWN": true, + "CFLUSH": true, + "CLOCAL": true, + "CLONE_CHILD_CLEARTID": true, + "CLONE_CHILD_SETTID": true, + "CLONE_CSIGNAL": true, + "CLONE_DETACHED": true, + "CLONE_FILES": true, + "CLONE_FS": true, + "CLONE_IO": true, + "CLONE_NEWIPC": true, + "CLONE_NEWNET": true, + "CLONE_NEWNS": true, + "CLONE_NEWPID": true, + "CLONE_NEWUSER": true, + "CLONE_NEWUTS": true, + "CLONE_PARENT": true, + "CLONE_PARENT_SETTID": true, + "CLONE_PID": true, + "CLONE_PTRACE": true, + "CLONE_SETTLS": true, + "CLONE_SIGHAND": true, + "CLONE_SYSVSEM": true, + "CLONE_THREAD": true, + "CLONE_UNTRACED": true, + "CLONE_VFORK": true, + "CLONE_VM": true, + "CPUID_CFLUSH": true, + "CREAD": true, + "CREATE_ALWAYS": true, + "CREATE_NEW": true, + "CREATE_NEW_PROCESS_GROUP": true, + "CREATE_UNICODE_ENVIRONMENT": true, + "CRYPT_DEFAULT_CONTAINER_OPTIONAL": true, + "CRYPT_DELETEKEYSET": true, + "CRYPT_MACHINE_KEYSET": true, + "CRYPT_NEWKEYSET": true, + "CRYPT_SILENT": true, + "CRYPT_VERIFYCONTEXT": true, + "CS5": true, + "CS6": true, + "CS7": true, + "CS8": true, + "CSIZE": true, + "CSTART": true, + "CSTATUS": true, + "CSTOP": true, + "CSTOPB": true, + "CSUSP": true, + "CTL_MAXNAME": true, + "CTL_NET": true, + "CTL_QUERY": true, + "CTRL_BREAK_EVENT": true, + "CTRL_C_EVENT": true, + "CancelIo": true, + "CancelIoEx": true, + "CertAddCertificateContextToStore": true, + "CertChainContext": true, + "CertChainElement": true, + "CertChainPara": true, + "CertChainPolicyPara": true, + "CertChainPolicyStatus": true, + "CertCloseStore": true, + "CertContext": true, + "CertCreateCertificateContext": true, + "CertEnhKeyUsage": true, + "CertEnumCertificatesInStore": true, + "CertFreeCertificateChain": true, + "CertFreeCertificateContext": true, + "CertGetCertificateChain": true, + "CertInfo": true, + "CertOpenStore": true, + "CertOpenSystemStore": true, + "CertRevocationCrlInfo": true, + "CertRevocationInfo": true, + "CertSimpleChain": true, + "CertTrustListInfo": true, + "CertTrustStatus": true, + "CertUsageMatch": true, + "CertVerifyCertificateChainPolicy": true, + "Chdir": true, + "CheckBpfVersion": true, + "Chflags": true, + "Chmod": true, + "Chown": true, + "Chroot": true, + "Clearenv": true, + "Close": true, + "CloseHandle": true, + "CloseOnExec": true, + "Closesocket": true, + "CmsgLen": true, + "CmsgSpace": true, + "Cmsghdr": true, + "CommandLineToArgv": true, + "ComputerName": true, + "Conn": true, + "Connect": true, + "ConnectEx": true, + "ConvertSidToStringSid": true, + "ConvertStringSidToSid": true, + "CopySid": true, + "Creat": true, + "CreateDirectory": true, + "CreateFile": true, + "CreateFileMapping": true, + "CreateHardLink": true, + "CreateIoCompletionPort": true, + "CreatePipe": true, + "CreateProcess": true, + "CreateProcessAsUser": true, + "CreateSymbolicLink": true, + "CreateToolhelp32Snapshot": true, + "Credential": true, + "CryptAcquireContext": true, + "CryptGenRandom": true, + "CryptReleaseContext": true, + "DIOCBSFLUSH": true, + "DIOCOSFPFLUSH": true, + "DLL": true, + "DLLError": true, + "DLT_A429": true, + "DLT_A653_ICM": true, + "DLT_AIRONET_HEADER": true, + "DLT_AOS": true, + "DLT_APPLE_IP_OVER_IEEE1394": true, + "DLT_ARCNET": true, + "DLT_ARCNET_LINUX": true, + "DLT_ATM_CLIP": true, + "DLT_ATM_RFC1483": true, + "DLT_AURORA": true, + "DLT_AX25": true, + "DLT_AX25_KISS": true, + "DLT_BACNET_MS_TP": true, + "DLT_BLUETOOTH_HCI_H4": true, + "DLT_BLUETOOTH_HCI_H4_WITH_PHDR": true, + "DLT_CAN20B": true, + "DLT_CAN_SOCKETCAN": true, + "DLT_CHAOS": true, + "DLT_CHDLC": true, + "DLT_CISCO_IOS": true, + "DLT_C_HDLC": true, + "DLT_C_HDLC_WITH_DIR": true, + "DLT_DBUS": true, + "DLT_DECT": true, + "DLT_DOCSIS": true, + "DLT_DVB_CI": true, + "DLT_ECONET": true, + "DLT_EN10MB": true, + "DLT_EN3MB": true, + "DLT_ENC": true, + "DLT_ERF": true, + "DLT_ERF_ETH": true, + "DLT_ERF_POS": true, + "DLT_FC_2": true, + "DLT_FC_2_WITH_FRAME_DELIMS": true, + "DLT_FDDI": true, + "DLT_FLEXRAY": true, + "DLT_FRELAY": true, + "DLT_FRELAY_WITH_DIR": true, + "DLT_GCOM_SERIAL": true, + "DLT_GCOM_T1E1": true, + "DLT_GPF_F": true, + "DLT_GPF_T": true, + "DLT_GPRS_LLC": true, + "DLT_GSMTAP_ABIS": true, + "DLT_GSMTAP_UM": true, + "DLT_HDLC": true, + "DLT_HHDLC": true, + "DLT_HIPPI": true, + "DLT_IBM_SN": true, + "DLT_IBM_SP": true, + "DLT_IEEE802": true, + "DLT_IEEE802_11": true, + "DLT_IEEE802_11_RADIO": true, + "DLT_IEEE802_11_RADIO_AVS": true, + "DLT_IEEE802_15_4": true, + "DLT_IEEE802_15_4_LINUX": true, + "DLT_IEEE802_15_4_NOFCS": true, + "DLT_IEEE802_15_4_NONASK_PHY": true, + "DLT_IEEE802_16_MAC_CPS": true, + "DLT_IEEE802_16_MAC_CPS_RADIO": true, + "DLT_IPFILTER": true, + "DLT_IPMB": true, + "DLT_IPMB_LINUX": true, + "DLT_IPNET": true, + "DLT_IPOIB": true, + "DLT_IPV4": true, + "DLT_IPV6": true, + "DLT_IP_OVER_FC": true, + "DLT_JUNIPER_ATM1": true, + "DLT_JUNIPER_ATM2": true, + "DLT_JUNIPER_ATM_CEMIC": true, + "DLT_JUNIPER_CHDLC": true, + "DLT_JUNIPER_ES": true, + "DLT_JUNIPER_ETHER": true, + "DLT_JUNIPER_FIBRECHANNEL": true, + "DLT_JUNIPER_FRELAY": true, + "DLT_JUNIPER_GGSN": true, + "DLT_JUNIPER_ISM": true, + "DLT_JUNIPER_MFR": true, + "DLT_JUNIPER_MLFR": true, + "DLT_JUNIPER_MLPPP": true, + "DLT_JUNIPER_MONITOR": true, + "DLT_JUNIPER_PIC_PEER": true, + "DLT_JUNIPER_PPP": true, + "DLT_JUNIPER_PPPOE": true, + "DLT_JUNIPER_PPPOE_ATM": true, + "DLT_JUNIPER_SERVICES": true, + "DLT_JUNIPER_SRX_E2E": true, + "DLT_JUNIPER_ST": true, + "DLT_JUNIPER_VP": true, + "DLT_JUNIPER_VS": true, + "DLT_LAPB_WITH_DIR": true, + "DLT_LAPD": true, + "DLT_LIN": true, + "DLT_LINUX_EVDEV": true, + "DLT_LINUX_IRDA": true, + "DLT_LINUX_LAPD": true, + "DLT_LINUX_PPP_WITHDIRECTION": true, + "DLT_LINUX_SLL": true, + "DLT_LOOP": true, + "DLT_LTALK": true, + "DLT_MATCHING_MAX": true, + "DLT_MATCHING_MIN": true, + "DLT_MFR": true, + "DLT_MOST": true, + "DLT_MPEG_2_TS": true, + "DLT_MPLS": true, + "DLT_MTP2": true, + "DLT_MTP2_WITH_PHDR": true, + "DLT_MTP3": true, + "DLT_MUX27010": true, + "DLT_NETANALYZER": true, + "DLT_NETANALYZER_TRANSPARENT": true, + "DLT_NFC_LLCP": true, + "DLT_NFLOG": true, + "DLT_NG40": true, + "DLT_NULL": true, + "DLT_PCI_EXP": true, + "DLT_PFLOG": true, + "DLT_PFSYNC": true, + "DLT_PPI": true, + "DLT_PPP": true, + "DLT_PPP_BSDOS": true, + "DLT_PPP_ETHER": true, + "DLT_PPP_PPPD": true, + "DLT_PPP_SERIAL": true, + "DLT_PPP_WITH_DIR": true, + "DLT_PPP_WITH_DIRECTION": true, + "DLT_PRISM_HEADER": true, + "DLT_PRONET": true, + "DLT_RAIF1": true, + "DLT_RAW": true, + "DLT_RAWAF_MASK": true, + "DLT_RIO": true, + "DLT_SCCP": true, + "DLT_SITA": true, + "DLT_SLIP": true, + "DLT_SLIP_BSDOS": true, + "DLT_STANAG_5066_D_PDU": true, + "DLT_SUNATM": true, + "DLT_SYMANTEC_FIREWALL": true, + "DLT_TZSP": true, + "DLT_USB": true, + "DLT_USB_LINUX": true, + "DLT_USB_LINUX_MMAPPED": true, + "DLT_USER0": true, + "DLT_USER1": true, + "DLT_USER10": true, + "DLT_USER11": true, + "DLT_USER12": true, + "DLT_USER13": true, + "DLT_USER14": true, + "DLT_USER15": true, + "DLT_USER2": true, + "DLT_USER3": true, + "DLT_USER4": true, + "DLT_USER5": true, + "DLT_USER6": true, + "DLT_USER7": true, + "DLT_USER8": true, + "DLT_USER9": true, + "DLT_WIHART": true, + "DLT_X2E_SERIAL": true, + "DLT_X2E_XORAYA": true, + "DNSMXData": true, + "DNSPTRData": true, + "DNSRecord": true, + "DNSSRVData": true, + "DNSTXTData": true, + "DNS_INFO_NO_RECORDS": true, + "DNS_TYPE_A": true, + "DNS_TYPE_A6": true, + "DNS_TYPE_AAAA": true, + "DNS_TYPE_ADDRS": true, + "DNS_TYPE_AFSDB": true, + "DNS_TYPE_ALL": true, + "DNS_TYPE_ANY": true, + "DNS_TYPE_ATMA": true, + "DNS_TYPE_AXFR": true, + "DNS_TYPE_CERT": true, + "DNS_TYPE_CNAME": true, + "DNS_TYPE_DHCID": true, + "DNS_TYPE_DNAME": true, + "DNS_TYPE_DNSKEY": true, + "DNS_TYPE_DS": true, + "DNS_TYPE_EID": true, + "DNS_TYPE_GID": true, + "DNS_TYPE_GPOS": true, + "DNS_TYPE_HINFO": true, + "DNS_TYPE_ISDN": true, + "DNS_TYPE_IXFR": true, + "DNS_TYPE_KEY": true, + "DNS_TYPE_KX": true, + "DNS_TYPE_LOC": true, + "DNS_TYPE_MAILA": true, + "DNS_TYPE_MAILB": true, + "DNS_TYPE_MB": true, + "DNS_TYPE_MD": true, + "DNS_TYPE_MF": true, + "DNS_TYPE_MG": true, + "DNS_TYPE_MINFO": true, + "DNS_TYPE_MR": true, + "DNS_TYPE_MX": true, + "DNS_TYPE_NAPTR": true, + "DNS_TYPE_NBSTAT": true, + "DNS_TYPE_NIMLOC": true, + "DNS_TYPE_NS": true, + "DNS_TYPE_NSAP": true, + "DNS_TYPE_NSAPPTR": true, + "DNS_TYPE_NSEC": true, + "DNS_TYPE_NULL": true, + "DNS_TYPE_NXT": true, + "DNS_TYPE_OPT": true, + "DNS_TYPE_PTR": true, + "DNS_TYPE_PX": true, + "DNS_TYPE_RP": true, + "DNS_TYPE_RRSIG": true, + "DNS_TYPE_RT": true, + "DNS_TYPE_SIG": true, + "DNS_TYPE_SINK": true, + "DNS_TYPE_SOA": true, + "DNS_TYPE_SRV": true, + "DNS_TYPE_TEXT": true, + "DNS_TYPE_TKEY": true, + "DNS_TYPE_TSIG": true, + "DNS_TYPE_UID": true, + "DNS_TYPE_UINFO": true, + "DNS_TYPE_UNSPEC": true, + "DNS_TYPE_WINS": true, + "DNS_TYPE_WINSR": true, + "DNS_TYPE_WKS": true, + "DNS_TYPE_X25": true, + "DT_BLK": true, + "DT_CHR": true, + "DT_DIR": true, + "DT_FIFO": true, + "DT_LNK": true, + "DT_REG": true, + "DT_SOCK": true, + "DT_UNKNOWN": true, + "DT_WHT": true, + "DUPLICATE_CLOSE_SOURCE": true, + "DUPLICATE_SAME_ACCESS": true, + "DeleteFile": true, + "DetachLsf": true, + "DeviceIoControl": true, + "Dirent": true, + "DnsNameCompare": true, + "DnsQuery": true, + "DnsRecordListFree": true, + "DnsSectionAdditional": true, + "DnsSectionAnswer": true, + "DnsSectionAuthority": true, + "DnsSectionQuestion": true, + "Dup": true, + "Dup2": true, + "Dup3": true, + "DuplicateHandle": true, + "E2BIG": true, + "EACCES": true, + "EADDRINUSE": true, + "EADDRNOTAVAIL": true, + "EADV": true, + "EAFNOSUPPORT": true, + "EAGAIN": true, + "EALREADY": true, + "EAUTH": true, + "EBADARCH": true, + "EBADE": true, + "EBADEXEC": true, + "EBADF": true, + "EBADFD": true, + "EBADMACHO": true, + "EBADMSG": true, + "EBADR": true, + "EBADRPC": true, + "EBADRQC": true, + "EBADSLT": true, + "EBFONT": true, + "EBUSY": true, + "ECANCELED": true, + "ECAPMODE": true, + "ECHILD": true, + "ECHO": true, + "ECHOCTL": true, + "ECHOE": true, + "ECHOK": true, + "ECHOKE": true, + "ECHONL": true, + "ECHOPRT": true, + "ECHRNG": true, + "ECOMM": true, + "ECONNABORTED": true, + "ECONNREFUSED": true, + "ECONNRESET": true, + "EDEADLK": true, + "EDEADLOCK": true, + "EDESTADDRREQ": true, + "EDEVERR": true, + "EDOM": true, + "EDOOFUS": true, + "EDOTDOT": true, + "EDQUOT": true, + "EEXIST": true, + "EFAULT": true, + "EFBIG": true, + "EFER_LMA": true, + "EFER_LME": true, + "EFER_NXE": true, + "EFER_SCE": true, + "EFTYPE": true, + "EHOSTDOWN": true, + "EHOSTUNREACH": true, + "EHWPOISON": true, + "EIDRM": true, + "EILSEQ": true, + "EINPROGRESS": true, + "EINTR": true, + "EINVAL": true, + "EIO": true, + "EIPSEC": true, + "EISCONN": true, + "EISDIR": true, + "EISNAM": true, + "EKEYEXPIRED": true, + "EKEYREJECTED": true, + "EKEYREVOKED": true, + "EL2HLT": true, + "EL2NSYNC": true, + "EL3HLT": true, + "EL3RST": true, + "ELAST": true, + "ELF_NGREG": true, + "ELF_PRARGSZ": true, + "ELIBACC": true, + "ELIBBAD": true, + "ELIBEXEC": true, + "ELIBMAX": true, + "ELIBSCN": true, + "ELNRNG": true, + "ELOOP": true, + "EMEDIUMTYPE": true, + "EMFILE": true, + "EMLINK": true, + "EMSGSIZE": true, + "EMT_TAGOVF": true, + "EMULTIHOP": true, + "EMUL_ENABLED": true, + "EMUL_LINUX": true, + "EMUL_LINUX32": true, + "EMUL_MAXID": true, + "EMUL_NATIVE": true, + "ENAMETOOLONG": true, + "ENAVAIL": true, + "ENDRUNDISC": true, + "ENEEDAUTH": true, + "ENETDOWN": true, + "ENETRESET": true, + "ENETUNREACH": true, + "ENFILE": true, + "ENOANO": true, + "ENOATTR": true, + "ENOBUFS": true, + "ENOCSI": true, + "ENODATA": true, + "ENODEV": true, + "ENOENT": true, + "ENOEXEC": true, + "ENOKEY": true, + "ENOLCK": true, + "ENOLINK": true, + "ENOMEDIUM": true, + "ENOMEM": true, + "ENOMSG": true, + "ENONET": true, + "ENOPKG": true, + "ENOPOLICY": true, + "ENOPROTOOPT": true, + "ENOSPC": true, + "ENOSR": true, + "ENOSTR": true, + "ENOSYS": true, + "ENOTBLK": true, + "ENOTCAPABLE": true, + "ENOTCONN": true, + "ENOTDIR": true, + "ENOTEMPTY": true, + "ENOTNAM": true, + "ENOTRECOVERABLE": true, + "ENOTSOCK": true, + "ENOTSUP": true, + "ENOTTY": true, + "ENOTUNIQ": true, + "ENXIO": true, + "EN_SW_CTL_INF": true, + "EN_SW_CTL_PREC": true, + "EN_SW_CTL_ROUND": true, + "EN_SW_DATACHAIN": true, + "EN_SW_DENORM": true, + "EN_SW_INVOP": true, + "EN_SW_OVERFLOW": true, + "EN_SW_PRECLOSS": true, + "EN_SW_UNDERFLOW": true, + "EN_SW_ZERODIV": true, + "EOPNOTSUPP": true, + "EOVERFLOW": true, + "EOWNERDEAD": true, + "EPERM": true, + "EPFNOSUPPORT": true, + "EPIPE": true, + "EPOLLERR": true, + "EPOLLET": true, + "EPOLLHUP": true, + "EPOLLIN": true, + "EPOLLMSG": true, + "EPOLLONESHOT": true, + "EPOLLOUT": true, + "EPOLLPRI": true, + "EPOLLRDBAND": true, + "EPOLLRDHUP": true, + "EPOLLRDNORM": true, + "EPOLLWRBAND": true, + "EPOLLWRNORM": true, + "EPOLL_CLOEXEC": true, + "EPOLL_CTL_ADD": true, + "EPOLL_CTL_DEL": true, + "EPOLL_CTL_MOD": true, + "EPOLL_NONBLOCK": true, + "EPROCLIM": true, + "EPROCUNAVAIL": true, + "EPROGMISMATCH": true, + "EPROGUNAVAIL": true, + "EPROTO": true, + "EPROTONOSUPPORT": true, + "EPROTOTYPE": true, + "EPWROFF": true, + "ERANGE": true, + "EREMCHG": true, + "EREMOTE": true, + "EREMOTEIO": true, + "ERESTART": true, + "ERFKILL": true, + "EROFS": true, + "ERPCMISMATCH": true, + "ERROR_ACCESS_DENIED": true, + "ERROR_ALREADY_EXISTS": true, + "ERROR_BROKEN_PIPE": true, + "ERROR_BUFFER_OVERFLOW": true, + "ERROR_DIR_NOT_EMPTY": true, + "ERROR_ENVVAR_NOT_FOUND": true, + "ERROR_FILE_EXISTS": true, + "ERROR_FILE_NOT_FOUND": true, + "ERROR_HANDLE_EOF": true, + "ERROR_INSUFFICIENT_BUFFER": true, + "ERROR_IO_PENDING": true, + "ERROR_MOD_NOT_FOUND": true, + "ERROR_MORE_DATA": true, + "ERROR_NETNAME_DELETED": true, + "ERROR_NOT_FOUND": true, + "ERROR_NO_MORE_FILES": true, + "ERROR_OPERATION_ABORTED": true, + "ERROR_PATH_NOT_FOUND": true, + "ERROR_PRIVILEGE_NOT_HELD": true, + "ERROR_PROC_NOT_FOUND": true, + "ESHLIBVERS": true, + "ESHUTDOWN": true, + "ESOCKTNOSUPPORT": true, + "ESPIPE": true, + "ESRCH": true, + "ESRMNT": true, + "ESTALE": true, + "ESTRPIPE": true, + "ETHERCAP_JUMBO_MTU": true, + "ETHERCAP_VLAN_HWTAGGING": true, + "ETHERCAP_VLAN_MTU": true, + "ETHERMIN": true, + "ETHERMTU": true, + "ETHERMTU_JUMBO": true, + "ETHERTYPE_8023": true, + "ETHERTYPE_AARP": true, + "ETHERTYPE_ACCTON": true, + "ETHERTYPE_AEONIC": true, + "ETHERTYPE_ALPHA": true, + "ETHERTYPE_AMBER": true, + "ETHERTYPE_AMOEBA": true, + "ETHERTYPE_AOE": true, + "ETHERTYPE_APOLLO": true, + "ETHERTYPE_APOLLODOMAIN": true, + "ETHERTYPE_APPLETALK": true, + "ETHERTYPE_APPLITEK": true, + "ETHERTYPE_ARGONAUT": true, + "ETHERTYPE_ARP": true, + "ETHERTYPE_AT": true, + "ETHERTYPE_ATALK": true, + "ETHERTYPE_ATOMIC": true, + "ETHERTYPE_ATT": true, + "ETHERTYPE_ATTSTANFORD": true, + "ETHERTYPE_AUTOPHON": true, + "ETHERTYPE_AXIS": true, + "ETHERTYPE_BCLOOP": true, + "ETHERTYPE_BOFL": true, + "ETHERTYPE_CABLETRON": true, + "ETHERTYPE_CHAOS": true, + "ETHERTYPE_COMDESIGN": true, + "ETHERTYPE_COMPUGRAPHIC": true, + "ETHERTYPE_COUNTERPOINT": true, + "ETHERTYPE_CRONUS": true, + "ETHERTYPE_CRONUSVLN": true, + "ETHERTYPE_DCA": true, + "ETHERTYPE_DDE": true, + "ETHERTYPE_DEBNI": true, + "ETHERTYPE_DECAM": true, + "ETHERTYPE_DECCUST": true, + "ETHERTYPE_DECDIAG": true, + "ETHERTYPE_DECDNS": true, + "ETHERTYPE_DECDTS": true, + "ETHERTYPE_DECEXPER": true, + "ETHERTYPE_DECLAST": true, + "ETHERTYPE_DECLTM": true, + "ETHERTYPE_DECMUMPS": true, + "ETHERTYPE_DECNETBIOS": true, + "ETHERTYPE_DELTACON": true, + "ETHERTYPE_DIDDLE": true, + "ETHERTYPE_DLOG1": true, + "ETHERTYPE_DLOG2": true, + "ETHERTYPE_DN": true, + "ETHERTYPE_DOGFIGHT": true, + "ETHERTYPE_DSMD": true, + "ETHERTYPE_ECMA": true, + "ETHERTYPE_ENCRYPT": true, + "ETHERTYPE_ES": true, + "ETHERTYPE_EXCELAN": true, + "ETHERTYPE_EXPERDATA": true, + "ETHERTYPE_FLIP": true, + "ETHERTYPE_FLOWCONTROL": true, + "ETHERTYPE_FRARP": true, + "ETHERTYPE_GENDYN": true, + "ETHERTYPE_HAYES": true, + "ETHERTYPE_HIPPI_FP": true, + "ETHERTYPE_HITACHI": true, + "ETHERTYPE_HP": true, + "ETHERTYPE_IEEEPUP": true, + "ETHERTYPE_IEEEPUPAT": true, + "ETHERTYPE_IMLBL": true, + "ETHERTYPE_IMLBLDIAG": true, + "ETHERTYPE_IP": true, + "ETHERTYPE_IPAS": true, + "ETHERTYPE_IPV6": true, + "ETHERTYPE_IPX": true, + "ETHERTYPE_IPXNEW": true, + "ETHERTYPE_KALPANA": true, + "ETHERTYPE_LANBRIDGE": true, + "ETHERTYPE_LANPROBE": true, + "ETHERTYPE_LAT": true, + "ETHERTYPE_LBACK": true, + "ETHERTYPE_LITTLE": true, + "ETHERTYPE_LLDP": true, + "ETHERTYPE_LOGICRAFT": true, + "ETHERTYPE_LOOPBACK": true, + "ETHERTYPE_MATRA": true, + "ETHERTYPE_MAX": true, + "ETHERTYPE_MERIT": true, + "ETHERTYPE_MICP": true, + "ETHERTYPE_MOPDL": true, + "ETHERTYPE_MOPRC": true, + "ETHERTYPE_MOTOROLA": true, + "ETHERTYPE_MPLS": true, + "ETHERTYPE_MPLS_MCAST": true, + "ETHERTYPE_MUMPS": true, + "ETHERTYPE_NBPCC": true, + "ETHERTYPE_NBPCLAIM": true, + "ETHERTYPE_NBPCLREQ": true, + "ETHERTYPE_NBPCLRSP": true, + "ETHERTYPE_NBPCREQ": true, + "ETHERTYPE_NBPCRSP": true, + "ETHERTYPE_NBPDG": true, + "ETHERTYPE_NBPDGB": true, + "ETHERTYPE_NBPDLTE": true, + "ETHERTYPE_NBPRAR": true, + "ETHERTYPE_NBPRAS": true, + "ETHERTYPE_NBPRST": true, + "ETHERTYPE_NBPSCD": true, + "ETHERTYPE_NBPVCD": true, + "ETHERTYPE_NBS": true, + "ETHERTYPE_NCD": true, + "ETHERTYPE_NESTAR": true, + "ETHERTYPE_NETBEUI": true, + "ETHERTYPE_NOVELL": true, + "ETHERTYPE_NS": true, + "ETHERTYPE_NSAT": true, + "ETHERTYPE_NSCOMPAT": true, + "ETHERTYPE_NTRAILER": true, + "ETHERTYPE_OS9": true, + "ETHERTYPE_OS9NET": true, + "ETHERTYPE_PACER": true, + "ETHERTYPE_PAE": true, + "ETHERTYPE_PCS": true, + "ETHERTYPE_PLANNING": true, + "ETHERTYPE_PPP": true, + "ETHERTYPE_PPPOE": true, + "ETHERTYPE_PPPOEDISC": true, + "ETHERTYPE_PRIMENTS": true, + "ETHERTYPE_PUP": true, + "ETHERTYPE_PUPAT": true, + "ETHERTYPE_QINQ": true, + "ETHERTYPE_RACAL": true, + "ETHERTYPE_RATIONAL": true, + "ETHERTYPE_RAWFR": true, + "ETHERTYPE_RCL": true, + "ETHERTYPE_RDP": true, + "ETHERTYPE_RETIX": true, + "ETHERTYPE_REVARP": true, + "ETHERTYPE_SCA": true, + "ETHERTYPE_SECTRA": true, + "ETHERTYPE_SECUREDATA": true, + "ETHERTYPE_SGITW": true, + "ETHERTYPE_SG_BOUNCE": true, + "ETHERTYPE_SG_DIAG": true, + "ETHERTYPE_SG_NETGAMES": true, + "ETHERTYPE_SG_RESV": true, + "ETHERTYPE_SIMNET": true, + "ETHERTYPE_SLOW": true, + "ETHERTYPE_SLOWPROTOCOLS": true, + "ETHERTYPE_SNA": true, + "ETHERTYPE_SNMP": true, + "ETHERTYPE_SONIX": true, + "ETHERTYPE_SPIDER": true, + "ETHERTYPE_SPRITE": true, + "ETHERTYPE_STP": true, + "ETHERTYPE_TALARIS": true, + "ETHERTYPE_TALARISMC": true, + "ETHERTYPE_TCPCOMP": true, + "ETHERTYPE_TCPSM": true, + "ETHERTYPE_TEC": true, + "ETHERTYPE_TIGAN": true, + "ETHERTYPE_TRAIL": true, + "ETHERTYPE_TRANSETHER": true, + "ETHERTYPE_TYMSHARE": true, + "ETHERTYPE_UBBST": true, + "ETHERTYPE_UBDEBUG": true, + "ETHERTYPE_UBDIAGLOOP": true, + "ETHERTYPE_UBDL": true, + "ETHERTYPE_UBNIU": true, + "ETHERTYPE_UBNMC": true, + "ETHERTYPE_VALID": true, + "ETHERTYPE_VARIAN": true, + "ETHERTYPE_VAXELN": true, + "ETHERTYPE_VEECO": true, + "ETHERTYPE_VEXP": true, + "ETHERTYPE_VGLAB": true, + "ETHERTYPE_VINES": true, + "ETHERTYPE_VINESECHO": true, + "ETHERTYPE_VINESLOOP": true, + "ETHERTYPE_VITAL": true, + "ETHERTYPE_VLAN": true, + "ETHERTYPE_VLTLMAN": true, + "ETHERTYPE_VPROD": true, + "ETHERTYPE_VURESERVED": true, + "ETHERTYPE_WATERLOO": true, + "ETHERTYPE_WELLFLEET": true, + "ETHERTYPE_X25": true, + "ETHERTYPE_X75": true, + "ETHERTYPE_XNSSM": true, + "ETHERTYPE_XTP": true, + "ETHER_ADDR_LEN": true, + "ETHER_ALIGN": true, + "ETHER_CRC_LEN": true, + "ETHER_CRC_POLY_BE": true, + "ETHER_CRC_POLY_LE": true, + "ETHER_HDR_LEN": true, + "ETHER_MAX_DIX_LEN": true, + "ETHER_MAX_LEN": true, + "ETHER_MAX_LEN_JUMBO": true, + "ETHER_MIN_LEN": true, + "ETHER_PPPOE_ENCAP_LEN": true, + "ETHER_TYPE_LEN": true, + "ETHER_VLAN_ENCAP_LEN": true, + "ETH_P_1588": true, + "ETH_P_8021Q": true, + "ETH_P_802_2": true, + "ETH_P_802_3": true, + "ETH_P_AARP": true, + "ETH_P_ALL": true, + "ETH_P_AOE": true, + "ETH_P_ARCNET": true, + "ETH_P_ARP": true, + "ETH_P_ATALK": true, + "ETH_P_ATMFATE": true, + "ETH_P_ATMMPOA": true, + "ETH_P_AX25": true, + "ETH_P_BPQ": true, + "ETH_P_CAIF": true, + "ETH_P_CAN": true, + "ETH_P_CONTROL": true, + "ETH_P_CUST": true, + "ETH_P_DDCMP": true, + "ETH_P_DEC": true, + "ETH_P_DIAG": true, + "ETH_P_DNA_DL": true, + "ETH_P_DNA_RC": true, + "ETH_P_DNA_RT": true, + "ETH_P_DSA": true, + "ETH_P_ECONET": true, + "ETH_P_EDSA": true, + "ETH_P_FCOE": true, + "ETH_P_FIP": true, + "ETH_P_HDLC": true, + "ETH_P_IEEE802154": true, + "ETH_P_IEEEPUP": true, + "ETH_P_IEEEPUPAT": true, + "ETH_P_IP": true, + "ETH_P_IPV6": true, + "ETH_P_IPX": true, + "ETH_P_IRDA": true, + "ETH_P_LAT": true, + "ETH_P_LINK_CTL": true, + "ETH_P_LOCALTALK": true, + "ETH_P_LOOP": true, + "ETH_P_MOBITEX": true, + "ETH_P_MPLS_MC": true, + "ETH_P_MPLS_UC": true, + "ETH_P_PAE": true, + "ETH_P_PAUSE": true, + "ETH_P_PHONET": true, + "ETH_P_PPPTALK": true, + "ETH_P_PPP_DISC": true, + "ETH_P_PPP_MP": true, + "ETH_P_PPP_SES": true, + "ETH_P_PUP": true, + "ETH_P_PUPAT": true, + "ETH_P_RARP": true, + "ETH_P_SCA": true, + "ETH_P_SLOW": true, + "ETH_P_SNAP": true, + "ETH_P_TEB": true, + "ETH_P_TIPC": true, + "ETH_P_TRAILER": true, + "ETH_P_TR_802_2": true, + "ETH_P_WAN_PPP": true, + "ETH_P_WCCP": true, + "ETH_P_X25": true, + "ETIME": true, + "ETIMEDOUT": true, + "ETOOMANYREFS": true, + "ETXTBSY": true, + "EUCLEAN": true, + "EUNATCH": true, + "EUSERS": true, + "EVFILT_AIO": true, + "EVFILT_FS": true, + "EVFILT_LIO": true, + "EVFILT_MACHPORT": true, + "EVFILT_PROC": true, + "EVFILT_READ": true, + "EVFILT_SIGNAL": true, + "EVFILT_SYSCOUNT": true, + "EVFILT_THREADMARKER": true, + "EVFILT_TIMER": true, + "EVFILT_USER": true, + "EVFILT_VM": true, + "EVFILT_VNODE": true, + "EVFILT_WRITE": true, + "EV_ADD": true, + "EV_CLEAR": true, + "EV_DELETE": true, + "EV_DISABLE": true, + "EV_DISPATCH": true, + "EV_DROP": true, + "EV_ENABLE": true, + "EV_EOF": true, + "EV_ERROR": true, + "EV_FLAG0": true, + "EV_FLAG1": true, + "EV_ONESHOT": true, + "EV_OOBAND": true, + "EV_POLL": true, + "EV_RECEIPT": true, + "EV_SYSFLAGS": true, + "EWINDOWS": true, + "EWOULDBLOCK": true, + "EXDEV": true, + "EXFULL": true, + "EXTA": true, + "EXTB": true, + "EXTPROC": true, + "Environ": true, + "EpollCreate": true, + "EpollCreate1": true, + "EpollCtl": true, + "EpollEvent": true, + "EpollWait": true, + "Errno": true, + "EscapeArg": true, + "Exchangedata": true, + "Exec": true, + "Exit": true, + "ExitProcess": true, + "FD_CLOEXEC": true, + "FD_SETSIZE": true, + "FILE_ACTION_ADDED": true, + "FILE_ACTION_MODIFIED": true, + "FILE_ACTION_REMOVED": true, + "FILE_ACTION_RENAMED_NEW_NAME": true, + "FILE_ACTION_RENAMED_OLD_NAME": true, + "FILE_APPEND_DATA": true, + "FILE_ATTRIBUTE_ARCHIVE": true, + "FILE_ATTRIBUTE_DIRECTORY": true, + "FILE_ATTRIBUTE_HIDDEN": true, + "FILE_ATTRIBUTE_NORMAL": true, + "FILE_ATTRIBUTE_READONLY": true, + "FILE_ATTRIBUTE_REPARSE_POINT": true, + "FILE_ATTRIBUTE_SYSTEM": true, + "FILE_BEGIN": true, + "FILE_CURRENT": true, + "FILE_END": true, + "FILE_FLAG_BACKUP_SEMANTICS": true, + "FILE_FLAG_OPEN_REPARSE_POINT": true, + "FILE_FLAG_OVERLAPPED": true, + "FILE_LIST_DIRECTORY": true, + "FILE_MAP_COPY": true, + "FILE_MAP_EXECUTE": true, + "FILE_MAP_READ": true, + "FILE_MAP_WRITE": true, + "FILE_NOTIFY_CHANGE_ATTRIBUTES": true, + "FILE_NOTIFY_CHANGE_CREATION": true, + "FILE_NOTIFY_CHANGE_DIR_NAME": true, + "FILE_NOTIFY_CHANGE_FILE_NAME": true, + "FILE_NOTIFY_CHANGE_LAST_ACCESS": true, + "FILE_NOTIFY_CHANGE_LAST_WRITE": true, + "FILE_NOTIFY_CHANGE_SIZE": true, + "FILE_SHARE_DELETE": true, + "FILE_SHARE_READ": true, + "FILE_SHARE_WRITE": true, + "FILE_SKIP_COMPLETION_PORT_ON_SUCCESS": true, + "FILE_SKIP_SET_EVENT_ON_HANDLE": true, + "FILE_TYPE_CHAR": true, + "FILE_TYPE_DISK": true, + "FILE_TYPE_PIPE": true, + "FILE_TYPE_REMOTE": true, + "FILE_TYPE_UNKNOWN": true, + "FILE_WRITE_ATTRIBUTES": true, + "FLUSHO": true, + "FORMAT_MESSAGE_ALLOCATE_BUFFER": true, + "FORMAT_MESSAGE_ARGUMENT_ARRAY": true, + "FORMAT_MESSAGE_FROM_HMODULE": true, + "FORMAT_MESSAGE_FROM_STRING": true, + "FORMAT_MESSAGE_FROM_SYSTEM": true, + "FORMAT_MESSAGE_IGNORE_INSERTS": true, + "FORMAT_MESSAGE_MAX_WIDTH_MASK": true, + "FSCTL_GET_REPARSE_POINT": true, + "F_ADDFILESIGS": true, + "F_ADDSIGS": true, + "F_ALLOCATEALL": true, + "F_ALLOCATECONTIG": true, + "F_CANCEL": true, + "F_CHKCLEAN": true, + "F_CLOSEM": true, + "F_DUP2FD": true, + "F_DUP2FD_CLOEXEC": true, + "F_DUPFD": true, + "F_DUPFD_CLOEXEC": true, + "F_EXLCK": true, + "F_FLUSH_DATA": true, + "F_FREEZE_FS": true, + "F_FSCTL": true, + "F_FSDIRMASK": true, + "F_FSIN": true, + "F_FSINOUT": true, + "F_FSOUT": true, + "F_FSPRIV": true, + "F_FSVOID": true, + "F_FULLFSYNC": true, + "F_GETFD": true, + "F_GETFL": true, + "F_GETLEASE": true, + "F_GETLK": true, + "F_GETLK64": true, + "F_GETLKPID": true, + "F_GETNOSIGPIPE": true, + "F_GETOWN": true, + "F_GETOWN_EX": true, + "F_GETPATH": true, + "F_GETPATH_MTMINFO": true, + "F_GETPIPE_SZ": true, + "F_GETPROTECTIONCLASS": true, + "F_GETSIG": true, + "F_GLOBAL_NOCACHE": true, + "F_LOCK": true, + "F_LOG2PHYS": true, + "F_LOG2PHYS_EXT": true, + "F_MARKDEPENDENCY": true, + "F_MAXFD": true, + "F_NOCACHE": true, + "F_NODIRECT": true, + "F_NOTIFY": true, + "F_OGETLK": true, + "F_OK": true, + "F_OSETLK": true, + "F_OSETLKW": true, + "F_PARAM_MASK": true, + "F_PARAM_MAX": true, + "F_PATHPKG_CHECK": true, + "F_PEOFPOSMODE": true, + "F_PREALLOCATE": true, + "F_RDADVISE": true, + "F_RDAHEAD": true, + "F_RDLCK": true, + "F_READAHEAD": true, + "F_READBOOTSTRAP": true, + "F_SETBACKINGSTORE": true, + "F_SETFD": true, + "F_SETFL": true, + "F_SETLEASE": true, + "F_SETLK": true, + "F_SETLK64": true, + "F_SETLKW": true, + "F_SETLKW64": true, + "F_SETLK_REMOTE": true, + "F_SETNOSIGPIPE": true, + "F_SETOWN": true, + "F_SETOWN_EX": true, + "F_SETPIPE_SZ": true, + "F_SETPROTECTIONCLASS": true, + "F_SETSIG": true, + "F_SETSIZE": true, + "F_SHLCK": true, + "F_TEST": true, + "F_THAW_FS": true, + "F_TLOCK": true, + "F_ULOCK": true, + "F_UNLCK": true, + "F_UNLCKSYS": true, + "F_VOLPOSMODE": true, + "F_WRITEBOOTSTRAP": true, + "F_WRLCK": true, + "Faccessat": true, + "Fallocate": true, + "Fbootstraptransfer_t": true, + "Fchdir": true, + "Fchflags": true, + "Fchmod": true, + "Fchmodat": true, + "Fchown": true, + "Fchownat": true, + "FcntlFlock": true, + "FdSet": true, + "Fdatasync": true, + "FileNotifyInformation": true, + "Filetime": true, + "FindClose": true, + "FindFirstFile": true, + "FindNextFile": true, + "Flock": true, + "Flock_t": true, + "FlushBpf": true, + "FlushFileBuffers": true, + "FlushViewOfFile": true, + "ForkExec": true, + "ForkLock": true, + "FormatMessage": true, + "Fpathconf": true, + "FreeAddrInfoW": true, + "FreeEnvironmentStrings": true, + "FreeLibrary": true, + "Fsid": true, + "Fstat": true, + "Fstatat": true, + "Fstatfs": true, + "Fstore_t": true, + "Fsync": true, + "Ftruncate": true, + "FullPath": true, + "Futimes": true, + "Futimesat": true, + "GENERIC_ALL": true, + "GENERIC_EXECUTE": true, + "GENERIC_READ": true, + "GENERIC_WRITE": true, + "GUID": true, + "GetAcceptExSockaddrs": true, + "GetAdaptersInfo": true, + "GetAddrInfoW": true, + "GetCommandLine": true, + "GetComputerName": true, + "GetConsoleMode": true, + "GetCurrentDirectory": true, + "GetCurrentProcess": true, + "GetEnvironmentStrings": true, + "GetEnvironmentVariable": true, + "GetExitCodeProcess": true, + "GetFileAttributes": true, + "GetFileAttributesEx": true, + "GetFileExInfoStandard": true, + "GetFileExMaxInfoLevel": true, + "GetFileInformationByHandle": true, + "GetFileType": true, + "GetFullPathName": true, + "GetHostByName": true, + "GetIfEntry": true, + "GetLastError": true, + "GetLengthSid": true, + "GetLongPathName": true, + "GetProcAddress": true, + "GetProcessTimes": true, + "GetProtoByName": true, + "GetQueuedCompletionStatus": true, + "GetServByName": true, + "GetShortPathName": true, + "GetStartupInfo": true, + "GetStdHandle": true, + "GetSystemTimeAsFileTime": true, + "GetTempPath": true, + "GetTimeZoneInformation": true, + "GetTokenInformation": true, + "GetUserNameEx": true, + "GetUserProfileDirectory": true, + "GetVersion": true, + "Getcwd": true, + "Getdents": true, + "Getdirentries": true, + "Getdtablesize": true, + "Getegid": true, + "Getenv": true, + "Geteuid": true, + "Getfsstat": true, + "Getgid": true, + "Getgroups": true, + "Getpagesize": true, + "Getpeername": true, + "Getpgid": true, + "Getpgrp": true, + "Getpid": true, + "Getppid": true, + "Getpriority": true, + "Getrlimit": true, + "Getrusage": true, + "Getsid": true, + "Getsockname": true, + "Getsockopt": true, + "GetsockoptByte": true, + "GetsockoptICMPv6Filter": true, + "GetsockoptIPMreq": true, + "GetsockoptIPMreqn": true, + "GetsockoptIPv6MTUInfo": true, + "GetsockoptIPv6Mreq": true, + "GetsockoptInet4Addr": true, + "GetsockoptInt": true, + "GetsockoptUcred": true, + "Gettid": true, + "Gettimeofday": true, + "Getuid": true, + "Getwd": true, + "Getxattr": true, + "HANDLE_FLAG_INHERIT": true, + "HKEY_CLASSES_ROOT": true, + "HKEY_CURRENT_CONFIG": true, + "HKEY_CURRENT_USER": true, + "HKEY_DYN_DATA": true, + "HKEY_LOCAL_MACHINE": true, + "HKEY_PERFORMANCE_DATA": true, + "HKEY_USERS": true, + "HUPCL": true, + "Handle": true, + "Hostent": true, + "ICANON": true, + "ICMP6_FILTER": true, + "ICMPV6_FILTER": true, + "ICMPv6Filter": true, + "ICRNL": true, + "IEXTEN": true, + "IFAN_ARRIVAL": true, + "IFAN_DEPARTURE": true, + "IFA_ADDRESS": true, + "IFA_ANYCAST": true, + "IFA_BROADCAST": true, + "IFA_CACHEINFO": true, + "IFA_F_DADFAILED": true, + "IFA_F_DEPRECATED": true, + "IFA_F_HOMEADDRESS": true, + "IFA_F_NODAD": true, + "IFA_F_OPTIMISTIC": true, + "IFA_F_PERMANENT": true, + "IFA_F_SECONDARY": true, + "IFA_F_TEMPORARY": true, + "IFA_F_TENTATIVE": true, + "IFA_LABEL": true, + "IFA_LOCAL": true, + "IFA_MAX": true, + "IFA_MULTICAST": true, + "IFA_ROUTE": true, + "IFA_UNSPEC": true, + "IFF_ALLMULTI": true, + "IFF_ALTPHYS": true, + "IFF_AUTOMEDIA": true, + "IFF_BROADCAST": true, + "IFF_CANTCHANGE": true, + "IFF_CANTCONFIG": true, + "IFF_DEBUG": true, + "IFF_DRV_OACTIVE": true, + "IFF_DRV_RUNNING": true, + "IFF_DYING": true, + "IFF_DYNAMIC": true, + "IFF_LINK0": true, + "IFF_LINK1": true, + "IFF_LINK2": true, + "IFF_LOOPBACK": true, + "IFF_MASTER": true, + "IFF_MONITOR": true, + "IFF_MULTICAST": true, + "IFF_NOARP": true, + "IFF_NOTRAILERS": true, + "IFF_NO_PI": true, + "IFF_OACTIVE": true, + "IFF_ONE_QUEUE": true, + "IFF_POINTOPOINT": true, + "IFF_POINTTOPOINT": true, + "IFF_PORTSEL": true, + "IFF_PPROMISC": true, + "IFF_PROMISC": true, + "IFF_RENAMING": true, + "IFF_RUNNING": true, + "IFF_SIMPLEX": true, + "IFF_SLAVE": true, + "IFF_SMART": true, + "IFF_STATICARP": true, + "IFF_TAP": true, + "IFF_TUN": true, + "IFF_TUN_EXCL": true, + "IFF_UP": true, + "IFF_VNET_HDR": true, + "IFLA_ADDRESS": true, + "IFLA_BROADCAST": true, + "IFLA_COST": true, + "IFLA_IFALIAS": true, + "IFLA_IFNAME": true, + "IFLA_LINK": true, + "IFLA_LINKINFO": true, + "IFLA_LINKMODE": true, + "IFLA_MAP": true, + "IFLA_MASTER": true, + "IFLA_MAX": true, + "IFLA_MTU": true, + "IFLA_NET_NS_PID": true, + "IFLA_OPERSTATE": true, + "IFLA_PRIORITY": true, + "IFLA_PROTINFO": true, + "IFLA_QDISC": true, + "IFLA_STATS": true, + "IFLA_TXQLEN": true, + "IFLA_UNSPEC": true, + "IFLA_WEIGHT": true, + "IFLA_WIRELESS": true, + "IFNAMSIZ": true, + "IFT_1822": true, + "IFT_A12MPPSWITCH": true, + "IFT_AAL2": true, + "IFT_AAL5": true, + "IFT_ADSL": true, + "IFT_AFLANE8023": true, + "IFT_AFLANE8025": true, + "IFT_ARAP": true, + "IFT_ARCNET": true, + "IFT_ARCNETPLUS": true, + "IFT_ASYNC": true, + "IFT_ATM": true, + "IFT_ATMDXI": true, + "IFT_ATMFUNI": true, + "IFT_ATMIMA": true, + "IFT_ATMLOGICAL": true, + "IFT_ATMRADIO": true, + "IFT_ATMSUBINTERFACE": true, + "IFT_ATMVCIENDPT": true, + "IFT_ATMVIRTUAL": true, + "IFT_BGPPOLICYACCOUNTING": true, + "IFT_BLUETOOTH": true, + "IFT_BRIDGE": true, + "IFT_BSC": true, + "IFT_CARP": true, + "IFT_CCTEMUL": true, + "IFT_CELLULAR": true, + "IFT_CEPT": true, + "IFT_CES": true, + "IFT_CHANNEL": true, + "IFT_CNR": true, + "IFT_COFFEE": true, + "IFT_COMPOSITELINK": true, + "IFT_DCN": true, + "IFT_DIGITALPOWERLINE": true, + "IFT_DIGITALWRAPPEROVERHEADCHANNEL": true, + "IFT_DLSW": true, + "IFT_DOCSCABLEDOWNSTREAM": true, + "IFT_DOCSCABLEMACLAYER": true, + "IFT_DOCSCABLEUPSTREAM": true, + "IFT_DOCSCABLEUPSTREAMCHANNEL": true, + "IFT_DS0": true, + "IFT_DS0BUNDLE": true, + "IFT_DS1FDL": true, + "IFT_DS3": true, + "IFT_DTM": true, + "IFT_DUMMY": true, + "IFT_DVBASILN": true, + "IFT_DVBASIOUT": true, + "IFT_DVBRCCDOWNSTREAM": true, + "IFT_DVBRCCMACLAYER": true, + "IFT_DVBRCCUPSTREAM": true, + "IFT_ECONET": true, + "IFT_ENC": true, + "IFT_EON": true, + "IFT_EPLRS": true, + "IFT_ESCON": true, + "IFT_ETHER": true, + "IFT_FAITH": true, + "IFT_FAST": true, + "IFT_FASTETHER": true, + "IFT_FASTETHERFX": true, + "IFT_FDDI": true, + "IFT_FIBRECHANNEL": true, + "IFT_FRAMERELAYINTERCONNECT": true, + "IFT_FRAMERELAYMPI": true, + "IFT_FRDLCIENDPT": true, + "IFT_FRELAY": true, + "IFT_FRELAYDCE": true, + "IFT_FRF16MFRBUNDLE": true, + "IFT_FRFORWARD": true, + "IFT_G703AT2MB": true, + "IFT_G703AT64K": true, + "IFT_GIF": true, + "IFT_GIGABITETHERNET": true, + "IFT_GR303IDT": true, + "IFT_GR303RDT": true, + "IFT_H323GATEKEEPER": true, + "IFT_H323PROXY": true, + "IFT_HDH1822": true, + "IFT_HDLC": true, + "IFT_HDSL2": true, + "IFT_HIPERLAN2": true, + "IFT_HIPPI": true, + "IFT_HIPPIINTERFACE": true, + "IFT_HOSTPAD": true, + "IFT_HSSI": true, + "IFT_HY": true, + "IFT_IBM370PARCHAN": true, + "IFT_IDSL": true, + "IFT_IEEE1394": true, + "IFT_IEEE80211": true, + "IFT_IEEE80212": true, + "IFT_IEEE8023ADLAG": true, + "IFT_IFGSN": true, + "IFT_IMT": true, + "IFT_INFINIBAND": true, + "IFT_INTERLEAVE": true, + "IFT_IP": true, + "IFT_IPFORWARD": true, + "IFT_IPOVERATM": true, + "IFT_IPOVERCDLC": true, + "IFT_IPOVERCLAW": true, + "IFT_IPSWITCH": true, + "IFT_IPXIP": true, + "IFT_ISDN": true, + "IFT_ISDNBASIC": true, + "IFT_ISDNPRIMARY": true, + "IFT_ISDNS": true, + "IFT_ISDNU": true, + "IFT_ISO88022LLC": true, + "IFT_ISO88023": true, + "IFT_ISO88024": true, + "IFT_ISO88025": true, + "IFT_ISO88025CRFPINT": true, + "IFT_ISO88025DTR": true, + "IFT_ISO88025FIBER": true, + "IFT_ISO88026": true, + "IFT_ISUP": true, + "IFT_L2VLAN": true, + "IFT_L3IPVLAN": true, + "IFT_L3IPXVLAN": true, + "IFT_LAPB": true, + "IFT_LAPD": true, + "IFT_LAPF": true, + "IFT_LINEGROUP": true, + "IFT_LOCALTALK": true, + "IFT_LOOP": true, + "IFT_MEDIAMAILOVERIP": true, + "IFT_MFSIGLINK": true, + "IFT_MIOX25": true, + "IFT_MODEM": true, + "IFT_MPC": true, + "IFT_MPLS": true, + "IFT_MPLSTUNNEL": true, + "IFT_MSDSL": true, + "IFT_MVL": true, + "IFT_MYRINET": true, + "IFT_NFAS": true, + "IFT_NSIP": true, + "IFT_OPTICALCHANNEL": true, + "IFT_OPTICALTRANSPORT": true, + "IFT_OTHER": true, + "IFT_P10": true, + "IFT_P80": true, + "IFT_PARA": true, + "IFT_PDP": true, + "IFT_PFLOG": true, + "IFT_PFLOW": true, + "IFT_PFSYNC": true, + "IFT_PLC": true, + "IFT_PON155": true, + "IFT_PON622": true, + "IFT_POS": true, + "IFT_PPP": true, + "IFT_PPPMULTILINKBUNDLE": true, + "IFT_PROPATM": true, + "IFT_PROPBWAP2MP": true, + "IFT_PROPCNLS": true, + "IFT_PROPDOCSWIRELESSDOWNSTREAM": true, + "IFT_PROPDOCSWIRELESSMACLAYER": true, + "IFT_PROPDOCSWIRELESSUPSTREAM": true, + "IFT_PROPMUX": true, + "IFT_PROPVIRTUAL": true, + "IFT_PROPWIRELESSP2P": true, + "IFT_PTPSERIAL": true, + "IFT_PVC": true, + "IFT_Q2931": true, + "IFT_QLLC": true, + "IFT_RADIOMAC": true, + "IFT_RADSL": true, + "IFT_REACHDSL": true, + "IFT_RFC1483": true, + "IFT_RS232": true, + "IFT_RSRB": true, + "IFT_SDLC": true, + "IFT_SDSL": true, + "IFT_SHDSL": true, + "IFT_SIP": true, + "IFT_SIPSIG": true, + "IFT_SIPTG": true, + "IFT_SLIP": true, + "IFT_SMDSDXI": true, + "IFT_SMDSICIP": true, + "IFT_SONET": true, + "IFT_SONETOVERHEADCHANNEL": true, + "IFT_SONETPATH": true, + "IFT_SONETVT": true, + "IFT_SRP": true, + "IFT_SS7SIGLINK": true, + "IFT_STACKTOSTACK": true, + "IFT_STARLAN": true, + "IFT_STF": true, + "IFT_T1": true, + "IFT_TDLC": true, + "IFT_TELINK": true, + "IFT_TERMPAD": true, + "IFT_TR008": true, + "IFT_TRANSPHDLC": true, + "IFT_TUNNEL": true, + "IFT_ULTRA": true, + "IFT_USB": true, + "IFT_V11": true, + "IFT_V35": true, + "IFT_V36": true, + "IFT_V37": true, + "IFT_VDSL": true, + "IFT_VIRTUALIPADDRESS": true, + "IFT_VIRTUALTG": true, + "IFT_VOICEDID": true, + "IFT_VOICEEM": true, + "IFT_VOICEEMFGD": true, + "IFT_VOICEENCAP": true, + "IFT_VOICEFGDEANA": true, + "IFT_VOICEFXO": true, + "IFT_VOICEFXS": true, + "IFT_VOICEOVERATM": true, + "IFT_VOICEOVERCABLE": true, + "IFT_VOICEOVERFRAMERELAY": true, + "IFT_VOICEOVERIP": true, + "IFT_X213": true, + "IFT_X25": true, + "IFT_X25DDN": true, + "IFT_X25HUNTGROUP": true, + "IFT_X25MLP": true, + "IFT_X25PLE": true, + "IFT_XETHER": true, + "IGNBRK": true, + "IGNCR": true, + "IGNORE": true, + "IGNPAR": true, + "IMAXBEL": true, + "INFINITE": true, + "INLCR": true, + "INPCK": true, + "INVALID_FILE_ATTRIBUTES": true, + "IN_ACCESS": true, + "IN_ALL_EVENTS": true, + "IN_ATTRIB": true, + "IN_CLASSA_HOST": true, + "IN_CLASSA_MAX": true, + "IN_CLASSA_NET": true, + "IN_CLASSA_NSHIFT": true, + "IN_CLASSB_HOST": true, + "IN_CLASSB_MAX": true, + "IN_CLASSB_NET": true, + "IN_CLASSB_NSHIFT": true, + "IN_CLASSC_HOST": true, + "IN_CLASSC_NET": true, + "IN_CLASSC_NSHIFT": true, + "IN_CLASSD_HOST": true, + "IN_CLASSD_NET": true, + "IN_CLASSD_NSHIFT": true, + "IN_CLOEXEC": true, + "IN_CLOSE": true, + "IN_CLOSE_NOWRITE": true, + "IN_CLOSE_WRITE": true, + "IN_CREATE": true, + "IN_DELETE": true, + "IN_DELETE_SELF": true, + "IN_DONT_FOLLOW": true, + "IN_EXCL_UNLINK": true, + "IN_IGNORED": true, + "IN_ISDIR": true, + "IN_LINKLOCALNETNUM": true, + "IN_LOOPBACKNET": true, + "IN_MASK_ADD": true, + "IN_MODIFY": true, + "IN_MOVE": true, + "IN_MOVED_FROM": true, + "IN_MOVED_TO": true, + "IN_MOVE_SELF": true, + "IN_NONBLOCK": true, + "IN_ONESHOT": true, + "IN_ONLYDIR": true, + "IN_OPEN": true, + "IN_Q_OVERFLOW": true, + "IN_RFC3021_HOST": true, + "IN_RFC3021_MASK": true, + "IN_RFC3021_NET": true, + "IN_RFC3021_NSHIFT": true, + "IN_UNMOUNT": true, + "IOC_IN": true, + "IOC_INOUT": true, + "IOC_OUT": true, + "IOC_VENDOR": true, + "IOC_WS2": true, + "IO_REPARSE_TAG_SYMLINK": true, + "IPMreq": true, + "IPMreqn": true, + "IPPROTO_3PC": true, + "IPPROTO_ADFS": true, + "IPPROTO_AH": true, + "IPPROTO_AHIP": true, + "IPPROTO_APES": true, + "IPPROTO_ARGUS": true, + "IPPROTO_AX25": true, + "IPPROTO_BHA": true, + "IPPROTO_BLT": true, + "IPPROTO_BRSATMON": true, + "IPPROTO_CARP": true, + "IPPROTO_CFTP": true, + "IPPROTO_CHAOS": true, + "IPPROTO_CMTP": true, + "IPPROTO_COMP": true, + "IPPROTO_CPHB": true, + "IPPROTO_CPNX": true, + "IPPROTO_DCCP": true, + "IPPROTO_DDP": true, + "IPPROTO_DGP": true, + "IPPROTO_DIVERT": true, + "IPPROTO_DIVERT_INIT": true, + "IPPROTO_DIVERT_RESP": true, + "IPPROTO_DONE": true, + "IPPROTO_DSTOPTS": true, + "IPPROTO_EGP": true, + "IPPROTO_EMCON": true, + "IPPROTO_ENCAP": true, + "IPPROTO_EON": true, + "IPPROTO_ESP": true, + "IPPROTO_ETHERIP": true, + "IPPROTO_FRAGMENT": true, + "IPPROTO_GGP": true, + "IPPROTO_GMTP": true, + "IPPROTO_GRE": true, + "IPPROTO_HELLO": true, + "IPPROTO_HMP": true, + "IPPROTO_HOPOPTS": true, + "IPPROTO_ICMP": true, + "IPPROTO_ICMPV6": true, + "IPPROTO_IDP": true, + "IPPROTO_IDPR": true, + "IPPROTO_IDRP": true, + "IPPROTO_IGMP": true, + "IPPROTO_IGP": true, + "IPPROTO_IGRP": true, + "IPPROTO_IL": true, + "IPPROTO_INLSP": true, + "IPPROTO_INP": true, + "IPPROTO_IP": true, + "IPPROTO_IPCOMP": true, + "IPPROTO_IPCV": true, + "IPPROTO_IPEIP": true, + "IPPROTO_IPIP": true, + "IPPROTO_IPPC": true, + "IPPROTO_IPV4": true, + "IPPROTO_IPV6": true, + "IPPROTO_IPV6_ICMP": true, + "IPPROTO_IRTP": true, + "IPPROTO_KRYPTOLAN": true, + "IPPROTO_LARP": true, + "IPPROTO_LEAF1": true, + "IPPROTO_LEAF2": true, + "IPPROTO_MAX": true, + "IPPROTO_MAXID": true, + "IPPROTO_MEAS": true, + "IPPROTO_MH": true, + "IPPROTO_MHRP": true, + "IPPROTO_MICP": true, + "IPPROTO_MOBILE": true, + "IPPROTO_MPLS": true, + "IPPROTO_MTP": true, + "IPPROTO_MUX": true, + "IPPROTO_ND": true, + "IPPROTO_NHRP": true, + "IPPROTO_NONE": true, + "IPPROTO_NSP": true, + "IPPROTO_NVPII": true, + "IPPROTO_OLD_DIVERT": true, + "IPPROTO_OSPFIGP": true, + "IPPROTO_PFSYNC": true, + "IPPROTO_PGM": true, + "IPPROTO_PIGP": true, + "IPPROTO_PIM": true, + "IPPROTO_PRM": true, + "IPPROTO_PUP": true, + "IPPROTO_PVP": true, + "IPPROTO_RAW": true, + "IPPROTO_RCCMON": true, + "IPPROTO_RDP": true, + "IPPROTO_ROUTING": true, + "IPPROTO_RSVP": true, + "IPPROTO_RVD": true, + "IPPROTO_SATEXPAK": true, + "IPPROTO_SATMON": true, + "IPPROTO_SCCSP": true, + "IPPROTO_SCTP": true, + "IPPROTO_SDRP": true, + "IPPROTO_SEND": true, + "IPPROTO_SEP": true, + "IPPROTO_SKIP": true, + "IPPROTO_SPACER": true, + "IPPROTO_SRPC": true, + "IPPROTO_ST": true, + "IPPROTO_SVMTP": true, + "IPPROTO_SWIPE": true, + "IPPROTO_TCF": true, + "IPPROTO_TCP": true, + "IPPROTO_TLSP": true, + "IPPROTO_TP": true, + "IPPROTO_TPXX": true, + "IPPROTO_TRUNK1": true, + "IPPROTO_TRUNK2": true, + "IPPROTO_TTP": true, + "IPPROTO_UDP": true, + "IPPROTO_UDPLITE": true, + "IPPROTO_VINES": true, + "IPPROTO_VISA": true, + "IPPROTO_VMTP": true, + "IPPROTO_VRRP": true, + "IPPROTO_WBEXPAK": true, + "IPPROTO_WBMON": true, + "IPPROTO_WSN": true, + "IPPROTO_XNET": true, + "IPPROTO_XTP": true, + "IPV6_2292DSTOPTS": true, + "IPV6_2292HOPLIMIT": true, + "IPV6_2292HOPOPTS": true, + "IPV6_2292NEXTHOP": true, + "IPV6_2292PKTINFO": true, + "IPV6_2292PKTOPTIONS": true, + "IPV6_2292RTHDR": true, + "IPV6_ADDRFORM": true, + "IPV6_ADD_MEMBERSHIP": true, + "IPV6_AUTHHDR": true, + "IPV6_AUTH_LEVEL": true, + "IPV6_AUTOFLOWLABEL": true, + "IPV6_BINDANY": true, + "IPV6_BINDV6ONLY": true, + "IPV6_BOUND_IF": true, + "IPV6_CHECKSUM": true, + "IPV6_DEFAULT_MULTICAST_HOPS": true, + "IPV6_DEFAULT_MULTICAST_LOOP": true, + "IPV6_DEFHLIM": true, + "IPV6_DONTFRAG": true, + "IPV6_DROP_MEMBERSHIP": true, + "IPV6_DSTOPTS": true, + "IPV6_ESP_NETWORK_LEVEL": true, + "IPV6_ESP_TRANS_LEVEL": true, + "IPV6_FAITH": true, + "IPV6_FLOWINFO_MASK": true, + "IPV6_FLOWLABEL_MASK": true, + "IPV6_FRAGTTL": true, + "IPV6_FW_ADD": true, + "IPV6_FW_DEL": true, + "IPV6_FW_FLUSH": true, + "IPV6_FW_GET": true, + "IPV6_FW_ZERO": true, + "IPV6_HLIMDEC": true, + "IPV6_HOPLIMIT": true, + "IPV6_HOPOPTS": true, + "IPV6_IPCOMP_LEVEL": true, + "IPV6_IPSEC_POLICY": true, + "IPV6_JOIN_ANYCAST": true, + "IPV6_JOIN_GROUP": true, + "IPV6_LEAVE_ANYCAST": true, + "IPV6_LEAVE_GROUP": true, + "IPV6_MAXHLIM": true, + "IPV6_MAXOPTHDR": true, + "IPV6_MAXPACKET": true, + "IPV6_MAX_GROUP_SRC_FILTER": true, + "IPV6_MAX_MEMBERSHIPS": true, + "IPV6_MAX_SOCK_SRC_FILTER": true, + "IPV6_MIN_MEMBERSHIPS": true, + "IPV6_MMTU": true, + "IPV6_MSFILTER": true, + "IPV6_MTU": true, + "IPV6_MTU_DISCOVER": true, + "IPV6_MULTICAST_HOPS": true, + "IPV6_MULTICAST_IF": true, + "IPV6_MULTICAST_LOOP": true, + "IPV6_NEXTHOP": true, + "IPV6_OPTIONS": true, + "IPV6_PATHMTU": true, + "IPV6_PIPEX": true, + "IPV6_PKTINFO": true, + "IPV6_PMTUDISC_DO": true, + "IPV6_PMTUDISC_DONT": true, + "IPV6_PMTUDISC_PROBE": true, + "IPV6_PMTUDISC_WANT": true, + "IPV6_PORTRANGE": true, + "IPV6_PORTRANGE_DEFAULT": true, + "IPV6_PORTRANGE_HIGH": true, + "IPV6_PORTRANGE_LOW": true, + "IPV6_PREFER_TEMPADDR": true, + "IPV6_RECVDSTOPTS": true, + "IPV6_RECVDSTPORT": true, + "IPV6_RECVERR": true, + "IPV6_RECVHOPLIMIT": true, + "IPV6_RECVHOPOPTS": true, + "IPV6_RECVPATHMTU": true, + "IPV6_RECVPKTINFO": true, + "IPV6_RECVRTHDR": true, + "IPV6_RECVTCLASS": true, + "IPV6_ROUTER_ALERT": true, + "IPV6_RTABLE": true, + "IPV6_RTHDR": true, + "IPV6_RTHDRDSTOPTS": true, + "IPV6_RTHDR_LOOSE": true, + "IPV6_RTHDR_STRICT": true, + "IPV6_RTHDR_TYPE_0": true, + "IPV6_RXDSTOPTS": true, + "IPV6_RXHOPOPTS": true, + "IPV6_SOCKOPT_RESERVED1": true, + "IPV6_TCLASS": true, + "IPV6_UNICAST_HOPS": true, + "IPV6_USE_MIN_MTU": true, + "IPV6_V6ONLY": true, + "IPV6_VERSION": true, + "IPV6_VERSION_MASK": true, + "IPV6_XFRM_POLICY": true, + "IP_ADD_MEMBERSHIP": true, + "IP_ADD_SOURCE_MEMBERSHIP": true, + "IP_AUTH_LEVEL": true, + "IP_BINDANY": true, + "IP_BLOCK_SOURCE": true, + "IP_BOUND_IF": true, + "IP_DEFAULT_MULTICAST_LOOP": true, + "IP_DEFAULT_MULTICAST_TTL": true, + "IP_DF": true, + "IP_DIVERTFL": true, + "IP_DONTFRAG": true, + "IP_DROP_MEMBERSHIP": true, + "IP_DROP_SOURCE_MEMBERSHIP": true, + "IP_DUMMYNET3": true, + "IP_DUMMYNET_CONFIGURE": true, + "IP_DUMMYNET_DEL": true, + "IP_DUMMYNET_FLUSH": true, + "IP_DUMMYNET_GET": true, + "IP_EF": true, + "IP_ERRORMTU": true, + "IP_ESP_NETWORK_LEVEL": true, + "IP_ESP_TRANS_LEVEL": true, + "IP_FAITH": true, + "IP_FREEBIND": true, + "IP_FW3": true, + "IP_FW_ADD": true, + "IP_FW_DEL": true, + "IP_FW_FLUSH": true, + "IP_FW_GET": true, + "IP_FW_NAT_CFG": true, + "IP_FW_NAT_DEL": true, + "IP_FW_NAT_GET_CONFIG": true, + "IP_FW_NAT_GET_LOG": true, + "IP_FW_RESETLOG": true, + "IP_FW_TABLE_ADD": true, + "IP_FW_TABLE_DEL": true, + "IP_FW_TABLE_FLUSH": true, + "IP_FW_TABLE_GETSIZE": true, + "IP_FW_TABLE_LIST": true, + "IP_FW_ZERO": true, + "IP_HDRINCL": true, + "IP_IPCOMP_LEVEL": true, + "IP_IPSECFLOWINFO": true, + "IP_IPSEC_LOCAL_AUTH": true, + "IP_IPSEC_LOCAL_CRED": true, + "IP_IPSEC_LOCAL_ID": true, + "IP_IPSEC_POLICY": true, + "IP_IPSEC_REMOTE_AUTH": true, + "IP_IPSEC_REMOTE_CRED": true, + "IP_IPSEC_REMOTE_ID": true, + "IP_MAXPACKET": true, + "IP_MAX_GROUP_SRC_FILTER": true, + "IP_MAX_MEMBERSHIPS": true, + "IP_MAX_SOCK_MUTE_FILTER": true, + "IP_MAX_SOCK_SRC_FILTER": true, + "IP_MAX_SOURCE_FILTER": true, + "IP_MF": true, + "IP_MINFRAGSIZE": true, + "IP_MINTTL": true, + "IP_MIN_MEMBERSHIPS": true, + "IP_MSFILTER": true, + "IP_MSS": true, + "IP_MTU": true, + "IP_MTU_DISCOVER": true, + "IP_MULTICAST_IF": true, + "IP_MULTICAST_IFINDEX": true, + "IP_MULTICAST_LOOP": true, + "IP_MULTICAST_TTL": true, + "IP_MULTICAST_VIF": true, + "IP_NAT__XXX": true, + "IP_OFFMASK": true, + "IP_OLD_FW_ADD": true, + "IP_OLD_FW_DEL": true, + "IP_OLD_FW_FLUSH": true, + "IP_OLD_FW_GET": true, + "IP_OLD_FW_RESETLOG": true, + "IP_OLD_FW_ZERO": true, + "IP_ONESBCAST": true, + "IP_OPTIONS": true, + "IP_ORIGDSTADDR": true, + "IP_PASSSEC": true, + "IP_PIPEX": true, + "IP_PKTINFO": true, + "IP_PKTOPTIONS": true, + "IP_PMTUDISC": true, + "IP_PMTUDISC_DO": true, + "IP_PMTUDISC_DONT": true, + "IP_PMTUDISC_PROBE": true, + "IP_PMTUDISC_WANT": true, + "IP_PORTRANGE": true, + "IP_PORTRANGE_DEFAULT": true, + "IP_PORTRANGE_HIGH": true, + "IP_PORTRANGE_LOW": true, + "IP_RECVDSTADDR": true, + "IP_RECVDSTPORT": true, + "IP_RECVERR": true, + "IP_RECVIF": true, + "IP_RECVOPTS": true, + "IP_RECVORIGDSTADDR": true, + "IP_RECVPKTINFO": true, + "IP_RECVRETOPTS": true, + "IP_RECVRTABLE": true, + "IP_RECVTOS": true, + "IP_RECVTTL": true, + "IP_RETOPTS": true, + "IP_RF": true, + "IP_ROUTER_ALERT": true, + "IP_RSVP_OFF": true, + "IP_RSVP_ON": true, + "IP_RSVP_VIF_OFF": true, + "IP_RSVP_VIF_ON": true, + "IP_RTABLE": true, + "IP_SENDSRCADDR": true, + "IP_STRIPHDR": true, + "IP_TOS": true, + "IP_TRAFFIC_MGT_BACKGROUND": true, + "IP_TRANSPARENT": true, + "IP_TTL": true, + "IP_UNBLOCK_SOURCE": true, + "IP_XFRM_POLICY": true, + "IPv6MTUInfo": true, + "IPv6Mreq": true, + "ISIG": true, + "ISTRIP": true, + "IUCLC": true, + "IUTF8": true, + "IXANY": true, + "IXOFF": true, + "IXON": true, + "IfAddrmsg": true, + "IfAnnounceMsghdr": true, + "IfData": true, + "IfInfomsg": true, + "IfMsghdr": true, + "IfaMsghdr": true, + "IfmaMsghdr": true, + "IfmaMsghdr2": true, + "ImplementsGetwd": true, + "Inet4Pktinfo": true, + "Inet6Pktinfo": true, + "InotifyAddWatch": true, + "InotifyEvent": true, + "InotifyInit": true, + "InotifyInit1": true, + "InotifyRmWatch": true, + "InterfaceAddrMessage": true, + "InterfaceAnnounceMessage": true, + "InterfaceInfo": true, + "InterfaceMessage": true, + "InterfaceMulticastAddrMessage": true, + "InvalidHandle": true, + "Ioperm": true, + "Iopl": true, + "Iovec": true, + "IpAdapterInfo": true, + "IpAddrString": true, + "IpAddressString": true, + "IpMaskString": true, + "Issetugid": true, + "KEY_ALL_ACCESS": true, + "KEY_CREATE_LINK": true, + "KEY_CREATE_SUB_KEY": true, + "KEY_ENUMERATE_SUB_KEYS": true, + "KEY_EXECUTE": true, + "KEY_NOTIFY": true, + "KEY_QUERY_VALUE": true, + "KEY_READ": true, + "KEY_SET_VALUE": true, + "KEY_WOW64_32KEY": true, + "KEY_WOW64_64KEY": true, + "KEY_WRITE": true, + "Kevent": true, + "Kevent_t": true, + "Kill": true, + "Klogctl": true, + "Kqueue": true, + "LANG_ENGLISH": true, + "LAYERED_PROTOCOL": true, + "LCNT_OVERLOAD_FLUSH": true, + "LINUX_REBOOT_CMD_CAD_OFF": true, + "LINUX_REBOOT_CMD_CAD_ON": true, + "LINUX_REBOOT_CMD_HALT": true, + "LINUX_REBOOT_CMD_KEXEC": true, + "LINUX_REBOOT_CMD_POWER_OFF": true, + "LINUX_REBOOT_CMD_RESTART": true, + "LINUX_REBOOT_CMD_RESTART2": true, + "LINUX_REBOOT_CMD_SW_SUSPEND": true, + "LINUX_REBOOT_MAGIC1": true, + "LINUX_REBOOT_MAGIC2": true, + "LOCK_EX": true, + "LOCK_NB": true, + "LOCK_SH": true, + "LOCK_UN": true, + "LazyDLL": true, + "LazyProc": true, + "Lchown": true, + "Linger": true, + "Link": true, + "Listen": true, + "Listxattr": true, + "LoadCancelIoEx": true, + "LoadConnectEx": true, + "LoadCreateSymbolicLink": true, + "LoadDLL": true, + "LoadGetAddrInfo": true, + "LoadLibrary": true, + "LoadSetFileCompletionNotificationModes": true, + "LocalFree": true, + "Log2phys_t": true, + "LookupAccountName": true, + "LookupAccountSid": true, + "LookupSID": true, + "LsfJump": true, + "LsfSocket": true, + "LsfStmt": true, + "Lstat": true, + "MADV_AUTOSYNC": true, + "MADV_CAN_REUSE": true, + "MADV_CORE": true, + "MADV_DOFORK": true, + "MADV_DONTFORK": true, + "MADV_DONTNEED": true, + "MADV_FREE": true, + "MADV_FREE_REUSABLE": true, + "MADV_FREE_REUSE": true, + "MADV_HUGEPAGE": true, + "MADV_HWPOISON": true, + "MADV_MERGEABLE": true, + "MADV_NOCORE": true, + "MADV_NOHUGEPAGE": true, + "MADV_NORMAL": true, + "MADV_NOSYNC": true, + "MADV_PROTECT": true, + "MADV_RANDOM": true, + "MADV_REMOVE": true, + "MADV_SEQUENTIAL": true, + "MADV_SPACEAVAIL": true, + "MADV_UNMERGEABLE": true, + "MADV_WILLNEED": true, + "MADV_ZERO_WIRED_PAGES": true, + "MAP_32BIT": true, + "MAP_ALIGNED_SUPER": true, + "MAP_ALIGNMENT_16MB": true, + "MAP_ALIGNMENT_1TB": true, + "MAP_ALIGNMENT_256TB": true, + "MAP_ALIGNMENT_4GB": true, + "MAP_ALIGNMENT_64KB": true, + "MAP_ALIGNMENT_64PB": true, + "MAP_ALIGNMENT_MASK": true, + "MAP_ALIGNMENT_SHIFT": true, + "MAP_ANON": true, + "MAP_ANONYMOUS": true, + "MAP_COPY": true, + "MAP_DENYWRITE": true, + "MAP_EXECUTABLE": true, + "MAP_FILE": true, + "MAP_FIXED": true, + "MAP_FLAGMASK": true, + "MAP_GROWSDOWN": true, + "MAP_HASSEMAPHORE": true, + "MAP_HUGETLB": true, + "MAP_INHERIT": true, + "MAP_INHERIT_COPY": true, + "MAP_INHERIT_DEFAULT": true, + "MAP_INHERIT_DONATE_COPY": true, + "MAP_INHERIT_NONE": true, + "MAP_INHERIT_SHARE": true, + "MAP_JIT": true, + "MAP_LOCKED": true, + "MAP_NOCACHE": true, + "MAP_NOCORE": true, + "MAP_NOEXTEND": true, + "MAP_NONBLOCK": true, + "MAP_NORESERVE": true, + "MAP_NOSYNC": true, + "MAP_POPULATE": true, + "MAP_PREFAULT_READ": true, + "MAP_PRIVATE": true, + "MAP_RENAME": true, + "MAP_RESERVED0080": true, + "MAP_RESERVED0100": true, + "MAP_SHARED": true, + "MAP_STACK": true, + "MAP_TRYFIXED": true, + "MAP_TYPE": true, + "MAP_WIRED": true, + "MAXIMUM_REPARSE_DATA_BUFFER_SIZE": true, + "MAXLEN_IFDESCR": true, + "MAXLEN_PHYSADDR": true, + "MAX_ADAPTER_ADDRESS_LENGTH": true, + "MAX_ADAPTER_DESCRIPTION_LENGTH": true, + "MAX_ADAPTER_NAME_LENGTH": true, + "MAX_COMPUTERNAME_LENGTH": true, + "MAX_INTERFACE_NAME_LEN": true, + "MAX_LONG_PATH": true, + "MAX_PATH": true, + "MAX_PROTOCOL_CHAIN": true, + "MCL_CURRENT": true, + "MCL_FUTURE": true, + "MNT_DETACH": true, + "MNT_EXPIRE": true, + "MNT_FORCE": true, + "MSG_BCAST": true, + "MSG_CMSG_CLOEXEC": true, + "MSG_COMPAT": true, + "MSG_CONFIRM": true, + "MSG_CONTROLMBUF": true, + "MSG_CTRUNC": true, + "MSG_DONTROUTE": true, + "MSG_DONTWAIT": true, + "MSG_EOF": true, + "MSG_EOR": true, + "MSG_ERRQUEUE": true, + "MSG_FASTOPEN": true, + "MSG_FIN": true, + "MSG_FLUSH": true, + "MSG_HAVEMORE": true, + "MSG_HOLD": true, + "MSG_IOVUSRSPACE": true, + "MSG_LENUSRSPACE": true, + "MSG_MCAST": true, + "MSG_MORE": true, + "MSG_NAMEMBUF": true, + "MSG_NBIO": true, + "MSG_NEEDSA": true, + "MSG_NOSIGNAL": true, + "MSG_NOTIFICATION": true, + "MSG_OOB": true, + "MSG_PEEK": true, + "MSG_PROXY": true, + "MSG_RCVMORE": true, + "MSG_RST": true, + "MSG_SEND": true, + "MSG_SYN": true, + "MSG_TRUNC": true, + "MSG_TRYHARD": true, + "MSG_USERFLAGS": true, + "MSG_WAITALL": true, + "MSG_WAITFORONE": true, + "MSG_WAITSTREAM": true, + "MS_ACTIVE": true, + "MS_ASYNC": true, + "MS_BIND": true, + "MS_DEACTIVATE": true, + "MS_DIRSYNC": true, + "MS_INVALIDATE": true, + "MS_I_VERSION": true, + "MS_KERNMOUNT": true, + "MS_KILLPAGES": true, + "MS_MANDLOCK": true, + "MS_MGC_MSK": true, + "MS_MGC_VAL": true, + "MS_MOVE": true, + "MS_NOATIME": true, + "MS_NODEV": true, + "MS_NODIRATIME": true, + "MS_NOEXEC": true, + "MS_NOSUID": true, + "MS_NOUSER": true, + "MS_POSIXACL": true, + "MS_PRIVATE": true, + "MS_RDONLY": true, + "MS_REC": true, + "MS_RELATIME": true, + "MS_REMOUNT": true, + "MS_RMT_MASK": true, + "MS_SHARED": true, + "MS_SILENT": true, + "MS_SLAVE": true, + "MS_STRICTATIME": true, + "MS_SYNC": true, + "MS_SYNCHRONOUS": true, + "MS_UNBINDABLE": true, + "Madvise": true, + "MapViewOfFile": true, + "MaxTokenInfoClass": true, + "Mclpool": true, + "MibIfRow": true, + "Mkdir": true, + "Mkdirat": true, + "Mkfifo": true, + "Mknod": true, + "Mknodat": true, + "Mlock": true, + "Mlockall": true, + "Mmap": true, + "Mount": true, + "MoveFile": true, + "Mprotect": true, + "Msghdr": true, + "Munlock": true, + "Munlockall": true, + "Munmap": true, + "MustLoadDLL": true, + "NAME_MAX": true, + "NETLINK_ADD_MEMBERSHIP": true, + "NETLINK_AUDIT": true, + "NETLINK_BROADCAST_ERROR": true, + "NETLINK_CONNECTOR": true, + "NETLINK_DNRTMSG": true, + "NETLINK_DROP_MEMBERSHIP": true, + "NETLINK_ECRYPTFS": true, + "NETLINK_FIB_LOOKUP": true, + "NETLINK_FIREWALL": true, + "NETLINK_GENERIC": true, + "NETLINK_INET_DIAG": true, + "NETLINK_IP6_FW": true, + "NETLINK_ISCSI": true, + "NETLINK_KOBJECT_UEVENT": true, + "NETLINK_NETFILTER": true, + "NETLINK_NFLOG": true, + "NETLINK_NO_ENOBUFS": true, + "NETLINK_PKTINFO": true, + "NETLINK_RDMA": true, + "NETLINK_ROUTE": true, + "NETLINK_SCSITRANSPORT": true, + "NETLINK_SELINUX": true, + "NETLINK_UNUSED": true, + "NETLINK_USERSOCK": true, + "NETLINK_XFRM": true, + "NET_RT_DUMP": true, + "NET_RT_DUMP2": true, + "NET_RT_FLAGS": true, + "NET_RT_IFLIST": true, + "NET_RT_IFLIST2": true, + "NET_RT_IFLISTL": true, + "NET_RT_IFMALIST": true, + "NET_RT_MAXID": true, + "NET_RT_OIFLIST": true, + "NET_RT_OOIFLIST": true, + "NET_RT_STAT": true, + "NET_RT_STATS": true, + "NET_RT_TABLE": true, + "NET_RT_TRASH": true, + "NLA_ALIGNTO": true, + "NLA_F_NESTED": true, + "NLA_F_NET_BYTEORDER": true, + "NLA_HDRLEN": true, + "NLMSG_ALIGNTO": true, + "NLMSG_DONE": true, + "NLMSG_ERROR": true, + "NLMSG_HDRLEN": true, + "NLMSG_MIN_TYPE": true, + "NLMSG_NOOP": true, + "NLMSG_OVERRUN": true, + "NLM_F_ACK": true, + "NLM_F_APPEND": true, + "NLM_F_ATOMIC": true, + "NLM_F_CREATE": true, + "NLM_F_DUMP": true, + "NLM_F_ECHO": true, + "NLM_F_EXCL": true, + "NLM_F_MATCH": true, + "NLM_F_MULTI": true, + "NLM_F_REPLACE": true, + "NLM_F_REQUEST": true, + "NLM_F_ROOT": true, + "NOFLSH": true, + "NOTE_ABSOLUTE": true, + "NOTE_ATTRIB": true, + "NOTE_CHILD": true, + "NOTE_DELETE": true, + "NOTE_EOF": true, + "NOTE_EXEC": true, + "NOTE_EXIT": true, + "NOTE_EXITSTATUS": true, + "NOTE_EXTEND": true, + "NOTE_FFAND": true, + "NOTE_FFCOPY": true, + "NOTE_FFCTRLMASK": true, + "NOTE_FFLAGSMASK": true, + "NOTE_FFNOP": true, + "NOTE_FFOR": true, + "NOTE_FORK": true, + "NOTE_LINK": true, + "NOTE_LOWAT": true, + "NOTE_NONE": true, + "NOTE_NSECONDS": true, + "NOTE_PCTRLMASK": true, + "NOTE_PDATAMASK": true, + "NOTE_REAP": true, + "NOTE_RENAME": true, + "NOTE_RESOURCEEND": true, + "NOTE_REVOKE": true, + "NOTE_SECONDS": true, + "NOTE_SIGNAL": true, + "NOTE_TRACK": true, + "NOTE_TRACKERR": true, + "NOTE_TRIGGER": true, + "NOTE_TRUNCATE": true, + "NOTE_USECONDS": true, + "NOTE_VM_ERROR": true, + "NOTE_VM_PRESSURE": true, + "NOTE_VM_PRESSURE_SUDDEN_TERMINATE": true, + "NOTE_VM_PRESSURE_TERMINATE": true, + "NOTE_WRITE": true, + "NameCanonical": true, + "NameCanonicalEx": true, + "NameDisplay": true, + "NameDnsDomain": true, + "NameFullyQualifiedDN": true, + "NameSamCompatible": true, + "NameServicePrincipal": true, + "NameUniqueId": true, + "NameUnknown": true, + "NameUserPrincipal": true, + "Nanosleep": true, + "NetApiBufferFree": true, + "NetGetJoinInformation": true, + "NetSetupDomainName": true, + "NetSetupUnjoined": true, + "NetSetupUnknownStatus": true, + "NetSetupWorkgroupName": true, + "NetUserGetInfo": true, + "NetlinkMessage": true, + "NetlinkRIB": true, + "NetlinkRouteAttr": true, + "NetlinkRouteRequest": true, + "NewCallback": true, + "NewCallbackCDecl": true, + "NewLazyDLL": true, + "NlAttr": true, + "NlMsgerr": true, + "NlMsghdr": true, + "NsecToFiletime": true, + "NsecToTimespec": true, + "NsecToTimeval": true, + "Ntohs": true, + "OCRNL": true, + "OFDEL": true, + "OFILL": true, + "OFIOGETBMAP": true, + "OID_PKIX_KP_SERVER_AUTH": true, + "OID_SERVER_GATED_CRYPTO": true, + "OID_SGC_NETSCAPE": true, + "OLCUC": true, + "ONLCR": true, + "ONLRET": true, + "ONOCR": true, + "ONOEOT": true, + "OPEN_ALWAYS": true, + "OPEN_EXISTING": true, + "OPOST": true, + "O_ACCMODE": true, + "O_ALERT": true, + "O_ALT_IO": true, + "O_APPEND": true, + "O_ASYNC": true, + "O_CLOEXEC": true, + "O_CREAT": true, + "O_DIRECT": true, + "O_DIRECTORY": true, + "O_DSYNC": true, + "O_EVTONLY": true, + "O_EXCL": true, + "O_EXEC": true, + "O_EXLOCK": true, + "O_FSYNC": true, + "O_LARGEFILE": true, + "O_NDELAY": true, + "O_NOATIME": true, + "O_NOCTTY": true, + "O_NOFOLLOW": true, + "O_NONBLOCK": true, + "O_NOSIGPIPE": true, + "O_POPUP": true, + "O_RDONLY": true, + "O_RDWR": true, + "O_RSYNC": true, + "O_SHLOCK": true, + "O_SYMLINK": true, + "O_SYNC": true, + "O_TRUNC": true, + "O_TTY_INIT": true, + "O_WRONLY": true, + "Open": true, + "OpenCurrentProcessToken": true, + "OpenProcess": true, + "OpenProcessToken": true, + "Openat": true, + "Overlapped": true, + "PACKET_ADD_MEMBERSHIP": true, + "PACKET_BROADCAST": true, + "PACKET_DROP_MEMBERSHIP": true, + "PACKET_FASTROUTE": true, + "PACKET_HOST": true, + "PACKET_LOOPBACK": true, + "PACKET_MR_ALLMULTI": true, + "PACKET_MR_MULTICAST": true, + "PACKET_MR_PROMISC": true, + "PACKET_MULTICAST": true, + "PACKET_OTHERHOST": true, + "PACKET_OUTGOING": true, + "PACKET_RECV_OUTPUT": true, + "PACKET_RX_RING": true, + "PACKET_STATISTICS": true, + "PAGE_EXECUTE_READ": true, + "PAGE_EXECUTE_READWRITE": true, + "PAGE_EXECUTE_WRITECOPY": true, + "PAGE_READONLY": true, + "PAGE_READWRITE": true, + "PAGE_WRITECOPY": true, + "PARENB": true, + "PARMRK": true, + "PARODD": true, + "PENDIN": true, + "PFL_HIDDEN": true, + "PFL_MATCHES_PROTOCOL_ZERO": true, + "PFL_MULTIPLE_PROTO_ENTRIES": true, + "PFL_NETWORKDIRECT_PROVIDER": true, + "PFL_RECOMMENDED_PROTO_ENTRY": true, + "PF_FLUSH": true, + "PKCS_7_ASN_ENCODING": true, + "PMC5_PIPELINE_FLUSH": true, + "PRIO_PGRP": true, + "PRIO_PROCESS": true, + "PRIO_USER": true, + "PRI_IOFLUSH": true, + "PROCESS_QUERY_INFORMATION": true, + "PROCESS_TERMINATE": true, + "PROT_EXEC": true, + "PROT_GROWSDOWN": true, + "PROT_GROWSUP": true, + "PROT_NONE": true, + "PROT_READ": true, + "PROT_WRITE": true, + "PROV_DH_SCHANNEL": true, + "PROV_DSS": true, + "PROV_DSS_DH": true, + "PROV_EC_ECDSA_FULL": true, + "PROV_EC_ECDSA_SIG": true, + "PROV_EC_ECNRA_FULL": true, + "PROV_EC_ECNRA_SIG": true, + "PROV_FORTEZZA": true, + "PROV_INTEL_SEC": true, + "PROV_MS_EXCHANGE": true, + "PROV_REPLACE_OWF": true, + "PROV_RNG": true, + "PROV_RSA_AES": true, + "PROV_RSA_FULL": true, + "PROV_RSA_SCHANNEL": true, + "PROV_RSA_SIG": true, + "PROV_SPYRUS_LYNKS": true, + "PROV_SSL": true, + "PR_CAPBSET_DROP": true, + "PR_CAPBSET_READ": true, + "PR_CLEAR_SECCOMP_FILTER": true, + "PR_ENDIAN_BIG": true, + "PR_ENDIAN_LITTLE": true, + "PR_ENDIAN_PPC_LITTLE": true, + "PR_FPEMU_NOPRINT": true, + "PR_FPEMU_SIGFPE": true, + "PR_FP_EXC_ASYNC": true, + "PR_FP_EXC_DISABLED": true, + "PR_FP_EXC_DIV": true, + "PR_FP_EXC_INV": true, + "PR_FP_EXC_NONRECOV": true, + "PR_FP_EXC_OVF": true, + "PR_FP_EXC_PRECISE": true, + "PR_FP_EXC_RES": true, + "PR_FP_EXC_SW_ENABLE": true, + "PR_FP_EXC_UND": true, + "PR_GET_DUMPABLE": true, + "PR_GET_ENDIAN": true, + "PR_GET_FPEMU": true, + "PR_GET_FPEXC": true, + "PR_GET_KEEPCAPS": true, + "PR_GET_NAME": true, + "PR_GET_PDEATHSIG": true, + "PR_GET_SECCOMP": true, + "PR_GET_SECCOMP_FILTER": true, + "PR_GET_SECUREBITS": true, + "PR_GET_TIMERSLACK": true, + "PR_GET_TIMING": true, + "PR_GET_TSC": true, + "PR_GET_UNALIGN": true, + "PR_MCE_KILL": true, + "PR_MCE_KILL_CLEAR": true, + "PR_MCE_KILL_DEFAULT": true, + "PR_MCE_KILL_EARLY": true, + "PR_MCE_KILL_GET": true, + "PR_MCE_KILL_LATE": true, + "PR_MCE_KILL_SET": true, + "PR_SECCOMP_FILTER_EVENT": true, + "PR_SECCOMP_FILTER_SYSCALL": true, + "PR_SET_DUMPABLE": true, + "PR_SET_ENDIAN": true, + "PR_SET_FPEMU": true, + "PR_SET_FPEXC": true, + "PR_SET_KEEPCAPS": true, + "PR_SET_NAME": true, + "PR_SET_PDEATHSIG": true, + "PR_SET_PTRACER": true, + "PR_SET_SECCOMP": true, + "PR_SET_SECCOMP_FILTER": true, + "PR_SET_SECUREBITS": true, + "PR_SET_TIMERSLACK": true, + "PR_SET_TIMING": true, + "PR_SET_TSC": true, + "PR_SET_UNALIGN": true, + "PR_TASK_PERF_EVENTS_DISABLE": true, + "PR_TASK_PERF_EVENTS_ENABLE": true, + "PR_TIMING_STATISTICAL": true, + "PR_TIMING_TIMESTAMP": true, + "PR_TSC_ENABLE": true, + "PR_TSC_SIGSEGV": true, + "PR_UNALIGN_NOPRINT": true, + "PR_UNALIGN_SIGBUS": true, + "PTRACE_ARCH_PRCTL": true, + "PTRACE_ATTACH": true, + "PTRACE_CONT": true, + "PTRACE_DETACH": true, + "PTRACE_EVENT_CLONE": true, + "PTRACE_EVENT_EXEC": true, + "PTRACE_EVENT_EXIT": true, + "PTRACE_EVENT_FORK": true, + "PTRACE_EVENT_VFORK": true, + "PTRACE_EVENT_VFORK_DONE": true, + "PTRACE_GETCRUNCHREGS": true, + "PTRACE_GETEVENTMSG": true, + "PTRACE_GETFPREGS": true, + "PTRACE_GETFPXREGS": true, + "PTRACE_GETHBPREGS": true, + "PTRACE_GETREGS": true, + "PTRACE_GETREGSET": true, + "PTRACE_GETSIGINFO": true, + "PTRACE_GETVFPREGS": true, + "PTRACE_GETWMMXREGS": true, + "PTRACE_GET_THREAD_AREA": true, + "PTRACE_KILL": true, + "PTRACE_OLDSETOPTIONS": true, + "PTRACE_O_MASK": true, + "PTRACE_O_TRACECLONE": true, + "PTRACE_O_TRACEEXEC": true, + "PTRACE_O_TRACEEXIT": true, + "PTRACE_O_TRACEFORK": true, + "PTRACE_O_TRACESYSGOOD": true, + "PTRACE_O_TRACEVFORK": true, + "PTRACE_O_TRACEVFORKDONE": true, + "PTRACE_PEEKDATA": true, + "PTRACE_PEEKTEXT": true, + "PTRACE_PEEKUSR": true, + "PTRACE_POKEDATA": true, + "PTRACE_POKETEXT": true, + "PTRACE_POKEUSR": true, + "PTRACE_SETCRUNCHREGS": true, + "PTRACE_SETFPREGS": true, + "PTRACE_SETFPXREGS": true, + "PTRACE_SETHBPREGS": true, + "PTRACE_SETOPTIONS": true, + "PTRACE_SETREGS": true, + "PTRACE_SETREGSET": true, + "PTRACE_SETSIGINFO": true, + "PTRACE_SETVFPREGS": true, + "PTRACE_SETWMMXREGS": true, + "PTRACE_SET_SYSCALL": true, + "PTRACE_SET_THREAD_AREA": true, + "PTRACE_SINGLEBLOCK": true, + "PTRACE_SINGLESTEP": true, + "PTRACE_SYSCALL": true, + "PTRACE_SYSEMU": true, + "PTRACE_SYSEMU_SINGLESTEP": true, + "PTRACE_TRACEME": true, + "PT_ATTACH": true, + "PT_ATTACHEXC": true, + "PT_CONTINUE": true, + "PT_DATA_ADDR": true, + "PT_DENY_ATTACH": true, + "PT_DETACH": true, + "PT_FIRSTMACH": true, + "PT_FORCEQUOTA": true, + "PT_KILL": true, + "PT_MASK": true, + "PT_READ_D": true, + "PT_READ_I": true, + "PT_READ_U": true, + "PT_SIGEXC": true, + "PT_STEP": true, + "PT_TEXT_ADDR": true, + "PT_TEXT_END_ADDR": true, + "PT_THUPDATE": true, + "PT_TRACE_ME": true, + "PT_WRITE_D": true, + "PT_WRITE_I": true, + "PT_WRITE_U": true, + "ParseDirent": true, + "ParseNetlinkMessage": true, + "ParseNetlinkRouteAttr": true, + "ParseRoutingMessage": true, + "ParseRoutingSockaddr": true, + "ParseSocketControlMessage": true, + "ParseUnixCredentials": true, + "ParseUnixRights": true, + "PathMax": true, + "Pathconf": true, + "Pause": true, + "Pipe": true, + "Pipe2": true, + "PivotRoot": true, + "Pointer": true, + "PostQueuedCompletionStatus": true, + "Pread": true, + "Proc": true, + "ProcAttr": true, + "Process32First": true, + "Process32Next": true, + "ProcessEntry32": true, + "ProcessInformation": true, + "Protoent": true, + "PtraceAttach": true, + "PtraceCont": true, + "PtraceDetach": true, + "PtraceGetEventMsg": true, + "PtraceGetRegs": true, + "PtracePeekData": true, + "PtracePeekText": true, + "PtracePokeData": true, + "PtracePokeText": true, + "PtraceRegs": true, + "PtraceSetOptions": true, + "PtraceSetRegs": true, + "PtraceSingleStep": true, + "PtraceSyscall": true, + "Pwrite": true, + "REG_BINARY": true, + "REG_DWORD": true, + "REG_DWORD_BIG_ENDIAN": true, + "REG_DWORD_LITTLE_ENDIAN": true, + "REG_EXPAND_SZ": true, + "REG_FULL_RESOURCE_DESCRIPTOR": true, + "REG_LINK": true, + "REG_MULTI_SZ": true, + "REG_NONE": true, + "REG_QWORD": true, + "REG_QWORD_LITTLE_ENDIAN": true, + "REG_RESOURCE_LIST": true, + "REG_RESOURCE_REQUIREMENTS_LIST": true, + "REG_SZ": true, + "RLIMIT_AS": true, + "RLIMIT_CORE": true, + "RLIMIT_CPU": true, + "RLIMIT_DATA": true, + "RLIMIT_FSIZE": true, + "RLIMIT_NOFILE": true, + "RLIMIT_STACK": true, + "RLIM_INFINITY": true, + "RTAX_ADVMSS": true, + "RTAX_AUTHOR": true, + "RTAX_BRD": true, + "RTAX_CWND": true, + "RTAX_DST": true, + "RTAX_FEATURES": true, + "RTAX_FEATURE_ALLFRAG": true, + "RTAX_FEATURE_ECN": true, + "RTAX_FEATURE_SACK": true, + "RTAX_FEATURE_TIMESTAMP": true, + "RTAX_GATEWAY": true, + "RTAX_GENMASK": true, + "RTAX_HOPLIMIT": true, + "RTAX_IFA": true, + "RTAX_IFP": true, + "RTAX_INITCWND": true, + "RTAX_INITRWND": true, + "RTAX_LABEL": true, + "RTAX_LOCK": true, + "RTAX_MAX": true, + "RTAX_MTU": true, + "RTAX_NETMASK": true, + "RTAX_REORDERING": true, + "RTAX_RTO_MIN": true, + "RTAX_RTT": true, + "RTAX_RTTVAR": true, + "RTAX_SRC": true, + "RTAX_SRCMASK": true, + "RTAX_SSTHRESH": true, + "RTAX_TAG": true, + "RTAX_UNSPEC": true, + "RTAX_WINDOW": true, + "RTA_ALIGNTO": true, + "RTA_AUTHOR": true, + "RTA_BRD": true, + "RTA_CACHEINFO": true, + "RTA_DST": true, + "RTA_FLOW": true, + "RTA_GATEWAY": true, + "RTA_GENMASK": true, + "RTA_IFA": true, + "RTA_IFP": true, + "RTA_IIF": true, + "RTA_LABEL": true, + "RTA_MAX": true, + "RTA_METRICS": true, + "RTA_MULTIPATH": true, + "RTA_NETMASK": true, + "RTA_OIF": true, + "RTA_PREFSRC": true, + "RTA_PRIORITY": true, + "RTA_SRC": true, + "RTA_SRCMASK": true, + "RTA_TABLE": true, + "RTA_TAG": true, + "RTA_UNSPEC": true, + "RTCF_DIRECTSRC": true, + "RTCF_DOREDIRECT": true, + "RTCF_LOG": true, + "RTCF_MASQ": true, + "RTCF_NAT": true, + "RTCF_VALVE": true, + "RTF_ADDRCLASSMASK": true, + "RTF_ADDRCONF": true, + "RTF_ALLONLINK": true, + "RTF_ANNOUNCE": true, + "RTF_BLACKHOLE": true, + "RTF_BROADCAST": true, + "RTF_CACHE": true, + "RTF_CLONED": true, + "RTF_CLONING": true, + "RTF_CONDEMNED": true, + "RTF_DEFAULT": true, + "RTF_DELCLONE": true, + "RTF_DONE": true, + "RTF_DYNAMIC": true, + "RTF_FLOW": true, + "RTF_FMASK": true, + "RTF_GATEWAY": true, + "RTF_GWFLAG_COMPAT": true, + "RTF_HOST": true, + "RTF_IFREF": true, + "RTF_IFSCOPE": true, + "RTF_INTERFACE": true, + "RTF_IRTT": true, + "RTF_LINKRT": true, + "RTF_LLDATA": true, + "RTF_LLINFO": true, + "RTF_LOCAL": true, + "RTF_MASK": true, + "RTF_MODIFIED": true, + "RTF_MPATH": true, + "RTF_MPLS": true, + "RTF_MSS": true, + "RTF_MTU": true, + "RTF_MULTICAST": true, + "RTF_NAT": true, + "RTF_NOFORWARD": true, + "RTF_NONEXTHOP": true, + "RTF_NOPMTUDISC": true, + "RTF_PERMANENT_ARP": true, + "RTF_PINNED": true, + "RTF_POLICY": true, + "RTF_PRCLONING": true, + "RTF_PROTO1": true, + "RTF_PROTO2": true, + "RTF_PROTO3": true, + "RTF_REINSTATE": true, + "RTF_REJECT": true, + "RTF_RNH_LOCKED": true, + "RTF_SOURCE": true, + "RTF_SRC": true, + "RTF_STATIC": true, + "RTF_STICKY": true, + "RTF_THROW": true, + "RTF_TUNNEL": true, + "RTF_UP": true, + "RTF_USETRAILERS": true, + "RTF_WASCLONED": true, + "RTF_WINDOW": true, + "RTF_XRESOLVE": true, + "RTM_ADD": true, + "RTM_BASE": true, + "RTM_CHANGE": true, + "RTM_CHGADDR": true, + "RTM_DELACTION": true, + "RTM_DELADDR": true, + "RTM_DELADDRLABEL": true, + "RTM_DELETE": true, + "RTM_DELLINK": true, + "RTM_DELMADDR": true, + "RTM_DELNEIGH": true, + "RTM_DELQDISC": true, + "RTM_DELROUTE": true, + "RTM_DELRULE": true, + "RTM_DELTCLASS": true, + "RTM_DELTFILTER": true, + "RTM_DESYNC": true, + "RTM_F_CLONED": true, + "RTM_F_EQUALIZE": true, + "RTM_F_NOTIFY": true, + "RTM_F_PREFIX": true, + "RTM_GET": true, + "RTM_GET2": true, + "RTM_GETACTION": true, + "RTM_GETADDR": true, + "RTM_GETADDRLABEL": true, + "RTM_GETANYCAST": true, + "RTM_GETDCB": true, + "RTM_GETLINK": true, + "RTM_GETMULTICAST": true, + "RTM_GETNEIGH": true, + "RTM_GETNEIGHTBL": true, + "RTM_GETQDISC": true, + "RTM_GETROUTE": true, + "RTM_GETRULE": true, + "RTM_GETTCLASS": true, + "RTM_GETTFILTER": true, + "RTM_IEEE80211": true, + "RTM_IFANNOUNCE": true, + "RTM_IFINFO": true, + "RTM_IFINFO2": true, + "RTM_LLINFO_UPD": true, + "RTM_LOCK": true, + "RTM_LOSING": true, + "RTM_MAX": true, + "RTM_MAXSIZE": true, + "RTM_MISS": true, + "RTM_NEWACTION": true, + "RTM_NEWADDR": true, + "RTM_NEWADDRLABEL": true, + "RTM_NEWLINK": true, + "RTM_NEWMADDR": true, + "RTM_NEWMADDR2": true, + "RTM_NEWNDUSEROPT": true, + "RTM_NEWNEIGH": true, + "RTM_NEWNEIGHTBL": true, + "RTM_NEWPREFIX": true, + "RTM_NEWQDISC": true, + "RTM_NEWROUTE": true, + "RTM_NEWRULE": true, + "RTM_NEWTCLASS": true, + "RTM_NEWTFILTER": true, + "RTM_NR_FAMILIES": true, + "RTM_NR_MSGTYPES": true, + "RTM_OIFINFO": true, + "RTM_OLDADD": true, + "RTM_OLDDEL": true, + "RTM_OOIFINFO": true, + "RTM_REDIRECT": true, + "RTM_RESOLVE": true, + "RTM_RTTUNIT": true, + "RTM_SETDCB": true, + "RTM_SETGATE": true, + "RTM_SETLINK": true, + "RTM_SETNEIGHTBL": true, + "RTM_VERSION": true, + "RTNH_ALIGNTO": true, + "RTNH_F_DEAD": true, + "RTNH_F_ONLINK": true, + "RTNH_F_PERVASIVE": true, + "RTNLGRP_IPV4_IFADDR": true, + "RTNLGRP_IPV4_MROUTE": true, + "RTNLGRP_IPV4_ROUTE": true, + "RTNLGRP_IPV4_RULE": true, + "RTNLGRP_IPV6_IFADDR": true, + "RTNLGRP_IPV6_IFINFO": true, + "RTNLGRP_IPV6_MROUTE": true, + "RTNLGRP_IPV6_PREFIX": true, + "RTNLGRP_IPV6_ROUTE": true, + "RTNLGRP_IPV6_RULE": true, + "RTNLGRP_LINK": true, + "RTNLGRP_ND_USEROPT": true, + "RTNLGRP_NEIGH": true, + "RTNLGRP_NONE": true, + "RTNLGRP_NOTIFY": true, + "RTNLGRP_TC": true, + "RTN_ANYCAST": true, + "RTN_BLACKHOLE": true, + "RTN_BROADCAST": true, + "RTN_LOCAL": true, + "RTN_MAX": true, + "RTN_MULTICAST": true, + "RTN_NAT": true, + "RTN_PROHIBIT": true, + "RTN_THROW": true, + "RTN_UNICAST": true, + "RTN_UNREACHABLE": true, + "RTN_UNSPEC": true, + "RTN_XRESOLVE": true, + "RTPROT_BIRD": true, + "RTPROT_BOOT": true, + "RTPROT_DHCP": true, + "RTPROT_DNROUTED": true, + "RTPROT_GATED": true, + "RTPROT_KERNEL": true, + "RTPROT_MRT": true, + "RTPROT_NTK": true, + "RTPROT_RA": true, + "RTPROT_REDIRECT": true, + "RTPROT_STATIC": true, + "RTPROT_UNSPEC": true, + "RTPROT_XORP": true, + "RTPROT_ZEBRA": true, + "RTV_EXPIRE": true, + "RTV_HOPCOUNT": true, + "RTV_MTU": true, + "RTV_RPIPE": true, + "RTV_RTT": true, + "RTV_RTTVAR": true, + "RTV_SPIPE": true, + "RTV_SSTHRESH": true, + "RTV_WEIGHT": true, + "RT_CACHING_CONTEXT": true, + "RT_CLASS_DEFAULT": true, + "RT_CLASS_LOCAL": true, + "RT_CLASS_MAIN": true, + "RT_CLASS_MAX": true, + "RT_CLASS_UNSPEC": true, + "RT_DEFAULT_FIB": true, + "RT_NORTREF": true, + "RT_SCOPE_HOST": true, + "RT_SCOPE_LINK": true, + "RT_SCOPE_NOWHERE": true, + "RT_SCOPE_SITE": true, + "RT_SCOPE_UNIVERSE": true, + "RT_TABLEID_MAX": true, + "RT_TABLE_COMPAT": true, + "RT_TABLE_DEFAULT": true, + "RT_TABLE_LOCAL": true, + "RT_TABLE_MAIN": true, + "RT_TABLE_MAX": true, + "RT_TABLE_UNSPEC": true, + "RUSAGE_CHILDREN": true, + "RUSAGE_SELF": true, + "RUSAGE_THREAD": true, + "Radvisory_t": true, + "RawConn": true, + "RawSockaddr": true, + "RawSockaddrAny": true, + "RawSockaddrDatalink": true, + "RawSockaddrInet4": true, + "RawSockaddrInet6": true, + "RawSockaddrLinklayer": true, + "RawSockaddrNetlink": true, + "RawSockaddrUnix": true, + "RawSyscall": true, + "RawSyscall6": true, + "Read": true, + "ReadConsole": true, + "ReadDirectoryChanges": true, + "ReadDirent": true, + "ReadFile": true, + "Readlink": true, + "Reboot": true, + "Recvfrom": true, + "Recvmsg": true, + "RegCloseKey": true, + "RegEnumKeyEx": true, + "RegOpenKeyEx": true, + "RegQueryInfoKey": true, + "RegQueryValueEx": true, + "RemoveDirectory": true, + "Removexattr": true, + "Rename": true, + "Renameat": true, + "Revoke": true, + "Rlimit": true, + "Rmdir": true, + "RouteMessage": true, + "RouteRIB": true, + "RoutingMessage": true, + "RtAttr": true, + "RtGenmsg": true, + "RtMetrics": true, + "RtMsg": true, + "RtMsghdr": true, + "RtNexthop": true, + "Rusage": true, + "SCM_BINTIME": true, + "SCM_CREDENTIALS": true, + "SCM_CREDS": true, + "SCM_RIGHTS": true, + "SCM_TIMESTAMP": true, + "SCM_TIMESTAMPING": true, + "SCM_TIMESTAMPNS": true, + "SCM_TIMESTAMP_MONOTONIC": true, + "SHUT_RD": true, + "SHUT_RDWR": true, + "SHUT_WR": true, + "SID": true, + "SIDAndAttributes": true, + "SIGABRT": true, + "SIGALRM": true, + "SIGBUS": true, + "SIGCHLD": true, + "SIGCLD": true, + "SIGCONT": true, + "SIGEMT": true, + "SIGFPE": true, + "SIGHUP": true, + "SIGILL": true, + "SIGINFO": true, + "SIGINT": true, + "SIGIO": true, + "SIGIOT": true, + "SIGKILL": true, + "SIGLIBRT": true, + "SIGLWP": true, + "SIGPIPE": true, + "SIGPOLL": true, + "SIGPROF": true, + "SIGPWR": true, + "SIGQUIT": true, + "SIGSEGV": true, + "SIGSTKFLT": true, + "SIGSTOP": true, + "SIGSYS": true, + "SIGTERM": true, + "SIGTHR": true, + "SIGTRAP": true, + "SIGTSTP": true, + "SIGTTIN": true, + "SIGTTOU": true, + "SIGUNUSED": true, + "SIGURG": true, + "SIGUSR1": true, + "SIGUSR2": true, + "SIGVTALRM": true, + "SIGWINCH": true, + "SIGXCPU": true, + "SIGXFSZ": true, + "SIOCADDDLCI": true, + "SIOCADDMULTI": true, + "SIOCADDRT": true, + "SIOCAIFADDR": true, + "SIOCAIFGROUP": true, + "SIOCALIFADDR": true, + "SIOCARPIPLL": true, + "SIOCATMARK": true, + "SIOCAUTOADDR": true, + "SIOCAUTONETMASK": true, + "SIOCBRDGADD": true, + "SIOCBRDGADDS": true, + "SIOCBRDGARL": true, + "SIOCBRDGDADDR": true, + "SIOCBRDGDEL": true, + "SIOCBRDGDELS": true, + "SIOCBRDGFLUSH": true, + "SIOCBRDGFRL": true, + "SIOCBRDGGCACHE": true, + "SIOCBRDGGFD": true, + "SIOCBRDGGHT": true, + "SIOCBRDGGIFFLGS": true, + "SIOCBRDGGMA": true, + "SIOCBRDGGPARAM": true, + "SIOCBRDGGPRI": true, + "SIOCBRDGGRL": true, + "SIOCBRDGGSIFS": true, + "SIOCBRDGGTO": true, + "SIOCBRDGIFS": true, + "SIOCBRDGRTS": true, + "SIOCBRDGSADDR": true, + "SIOCBRDGSCACHE": true, + "SIOCBRDGSFD": true, + "SIOCBRDGSHT": true, + "SIOCBRDGSIFCOST": true, + "SIOCBRDGSIFFLGS": true, + "SIOCBRDGSIFPRIO": true, + "SIOCBRDGSMA": true, + "SIOCBRDGSPRI": true, + "SIOCBRDGSPROTO": true, + "SIOCBRDGSTO": true, + "SIOCBRDGSTXHC": true, + "SIOCDARP": true, + "SIOCDELDLCI": true, + "SIOCDELMULTI": true, + "SIOCDELRT": true, + "SIOCDEVPRIVATE": true, + "SIOCDIFADDR": true, + "SIOCDIFGROUP": true, + "SIOCDIFPHYADDR": true, + "SIOCDLIFADDR": true, + "SIOCDRARP": true, + "SIOCGARP": true, + "SIOCGDRVSPEC": true, + "SIOCGETKALIVE": true, + "SIOCGETLABEL": true, + "SIOCGETPFLOW": true, + "SIOCGETPFSYNC": true, + "SIOCGETSGCNT": true, + "SIOCGETVIFCNT": true, + "SIOCGETVLAN": true, + "SIOCGHIWAT": true, + "SIOCGIFADDR": true, + "SIOCGIFADDRPREF": true, + "SIOCGIFALIAS": true, + "SIOCGIFALTMTU": true, + "SIOCGIFASYNCMAP": true, + "SIOCGIFBOND": true, + "SIOCGIFBR": true, + "SIOCGIFBRDADDR": true, + "SIOCGIFCAP": true, + "SIOCGIFCONF": true, + "SIOCGIFCOUNT": true, + "SIOCGIFDATA": true, + "SIOCGIFDESCR": true, + "SIOCGIFDEVMTU": true, + "SIOCGIFDLT": true, + "SIOCGIFDSTADDR": true, + "SIOCGIFENCAP": true, + "SIOCGIFFIB": true, + "SIOCGIFFLAGS": true, + "SIOCGIFGATTR": true, + "SIOCGIFGENERIC": true, + "SIOCGIFGMEMB": true, + "SIOCGIFGROUP": true, + "SIOCGIFHARDMTU": true, + "SIOCGIFHWADDR": true, + "SIOCGIFINDEX": true, + "SIOCGIFKPI": true, + "SIOCGIFMAC": true, + "SIOCGIFMAP": true, + "SIOCGIFMEDIA": true, + "SIOCGIFMEM": true, + "SIOCGIFMETRIC": true, + "SIOCGIFMTU": true, + "SIOCGIFNAME": true, + "SIOCGIFNETMASK": true, + "SIOCGIFPDSTADDR": true, + "SIOCGIFPFLAGS": true, + "SIOCGIFPHYS": true, + "SIOCGIFPRIORITY": true, + "SIOCGIFPSRCADDR": true, + "SIOCGIFRDOMAIN": true, + "SIOCGIFRTLABEL": true, + "SIOCGIFSLAVE": true, + "SIOCGIFSTATUS": true, + "SIOCGIFTIMESLOT": true, + "SIOCGIFTXQLEN": true, + "SIOCGIFVLAN": true, + "SIOCGIFWAKEFLAGS": true, + "SIOCGIFXFLAGS": true, + "SIOCGLIFADDR": true, + "SIOCGLIFPHYADDR": true, + "SIOCGLIFPHYRTABLE": true, + "SIOCGLIFPHYTTL": true, + "SIOCGLINKSTR": true, + "SIOCGLOWAT": true, + "SIOCGPGRP": true, + "SIOCGPRIVATE_0": true, + "SIOCGPRIVATE_1": true, + "SIOCGRARP": true, + "SIOCGSPPPPARAMS": true, + "SIOCGSTAMP": true, + "SIOCGSTAMPNS": true, + "SIOCGVH": true, + "SIOCGVNETID": true, + "SIOCIFCREATE": true, + "SIOCIFCREATE2": true, + "SIOCIFDESTROY": true, + "SIOCIFGCLONERS": true, + "SIOCINITIFADDR": true, + "SIOCPROTOPRIVATE": true, + "SIOCRSLVMULTI": true, + "SIOCRTMSG": true, + "SIOCSARP": true, + "SIOCSDRVSPEC": true, + "SIOCSETKALIVE": true, + "SIOCSETLABEL": true, + "SIOCSETPFLOW": true, + "SIOCSETPFSYNC": true, + "SIOCSETVLAN": true, + "SIOCSHIWAT": true, + "SIOCSIFADDR": true, + "SIOCSIFADDRPREF": true, + "SIOCSIFALTMTU": true, + "SIOCSIFASYNCMAP": true, + "SIOCSIFBOND": true, + "SIOCSIFBR": true, + "SIOCSIFBRDADDR": true, + "SIOCSIFCAP": true, + "SIOCSIFDESCR": true, + "SIOCSIFDSTADDR": true, + "SIOCSIFENCAP": true, + "SIOCSIFFIB": true, + "SIOCSIFFLAGS": true, + "SIOCSIFGATTR": true, + "SIOCSIFGENERIC": true, + "SIOCSIFHWADDR": true, + "SIOCSIFHWBROADCAST": true, + "SIOCSIFKPI": true, + "SIOCSIFLINK": true, + "SIOCSIFLLADDR": true, + "SIOCSIFMAC": true, + "SIOCSIFMAP": true, + "SIOCSIFMEDIA": true, + "SIOCSIFMEM": true, + "SIOCSIFMETRIC": true, + "SIOCSIFMTU": true, + "SIOCSIFNAME": true, + "SIOCSIFNETMASK": true, + "SIOCSIFPFLAGS": true, + "SIOCSIFPHYADDR": true, + "SIOCSIFPHYS": true, + "SIOCSIFPRIORITY": true, + "SIOCSIFRDOMAIN": true, + "SIOCSIFRTLABEL": true, + "SIOCSIFRVNET": true, + "SIOCSIFSLAVE": true, + "SIOCSIFTIMESLOT": true, + "SIOCSIFTXQLEN": true, + "SIOCSIFVLAN": true, + "SIOCSIFVNET": true, + "SIOCSIFXFLAGS": true, + "SIOCSLIFPHYADDR": true, + "SIOCSLIFPHYRTABLE": true, + "SIOCSLIFPHYTTL": true, + "SIOCSLINKSTR": true, + "SIOCSLOWAT": true, + "SIOCSPGRP": true, + "SIOCSRARP": true, + "SIOCSSPPPPARAMS": true, + "SIOCSVH": true, + "SIOCSVNETID": true, + "SIOCZIFDATA": true, + "SIO_GET_EXTENSION_FUNCTION_POINTER": true, + "SIO_GET_INTERFACE_LIST": true, + "SIO_KEEPALIVE_VALS": true, + "SIO_UDP_CONNRESET": true, + "SOCK_CLOEXEC": true, + "SOCK_DCCP": true, + "SOCK_DGRAM": true, + "SOCK_FLAGS_MASK": true, + "SOCK_MAXADDRLEN": true, + "SOCK_NONBLOCK": true, + "SOCK_NOSIGPIPE": true, + "SOCK_PACKET": true, + "SOCK_RAW": true, + "SOCK_RDM": true, + "SOCK_SEQPACKET": true, + "SOCK_STREAM": true, + "SOL_AAL": true, + "SOL_ATM": true, + "SOL_DECNET": true, + "SOL_ICMPV6": true, + "SOL_IP": true, + "SOL_IPV6": true, + "SOL_IRDA": true, + "SOL_PACKET": true, + "SOL_RAW": true, + "SOL_SOCKET": true, + "SOL_TCP": true, + "SOL_X25": true, + "SOMAXCONN": true, + "SO_ACCEPTCONN": true, + "SO_ACCEPTFILTER": true, + "SO_ATTACH_FILTER": true, + "SO_BINDANY": true, + "SO_BINDTODEVICE": true, + "SO_BINTIME": true, + "SO_BROADCAST": true, + "SO_BSDCOMPAT": true, + "SO_DEBUG": true, + "SO_DETACH_FILTER": true, + "SO_DOMAIN": true, + "SO_DONTROUTE": true, + "SO_DONTTRUNC": true, + "SO_ERROR": true, + "SO_KEEPALIVE": true, + "SO_LABEL": true, + "SO_LINGER": true, + "SO_LINGER_SEC": true, + "SO_LISTENINCQLEN": true, + "SO_LISTENQLEN": true, + "SO_LISTENQLIMIT": true, + "SO_MARK": true, + "SO_NETPROC": true, + "SO_NKE": true, + "SO_NOADDRERR": true, + "SO_NOHEADER": true, + "SO_NOSIGPIPE": true, + "SO_NOTIFYCONFLICT": true, + "SO_NO_CHECK": true, + "SO_NO_DDP": true, + "SO_NO_OFFLOAD": true, + "SO_NP_EXTENSIONS": true, + "SO_NREAD": true, + "SO_NWRITE": true, + "SO_OOBINLINE": true, + "SO_OVERFLOWED": true, + "SO_PASSCRED": true, + "SO_PASSSEC": true, + "SO_PEERCRED": true, + "SO_PEERLABEL": true, + "SO_PEERNAME": true, + "SO_PEERSEC": true, + "SO_PRIORITY": true, + "SO_PROTOCOL": true, + "SO_PROTOTYPE": true, + "SO_RANDOMPORT": true, + "SO_RCVBUF": true, + "SO_RCVBUFFORCE": true, + "SO_RCVLOWAT": true, + "SO_RCVTIMEO": true, + "SO_RESTRICTIONS": true, + "SO_RESTRICT_DENYIN": true, + "SO_RESTRICT_DENYOUT": true, + "SO_RESTRICT_DENYSET": true, + "SO_REUSEADDR": true, + "SO_REUSEPORT": true, + "SO_REUSESHAREUID": true, + "SO_RTABLE": true, + "SO_RXQ_OVFL": true, + "SO_SECURITY_AUTHENTICATION": true, + "SO_SECURITY_ENCRYPTION_NETWORK": true, + "SO_SECURITY_ENCRYPTION_TRANSPORT": true, + "SO_SETFIB": true, + "SO_SNDBUF": true, + "SO_SNDBUFFORCE": true, + "SO_SNDLOWAT": true, + "SO_SNDTIMEO": true, + "SO_SPLICE": true, + "SO_TIMESTAMP": true, + "SO_TIMESTAMPING": true, + "SO_TIMESTAMPNS": true, + "SO_TIMESTAMP_MONOTONIC": true, + "SO_TYPE": true, + "SO_UPCALLCLOSEWAIT": true, + "SO_UPDATE_ACCEPT_CONTEXT": true, + "SO_UPDATE_CONNECT_CONTEXT": true, + "SO_USELOOPBACK": true, + "SO_USER_COOKIE": true, + "SO_VENDOR": true, + "SO_WANTMORE": true, + "SO_WANTOOBFLAG": true, + "SSLExtraCertChainPolicyPara": true, + "STANDARD_RIGHTS_ALL": true, + "STANDARD_RIGHTS_EXECUTE": true, + "STANDARD_RIGHTS_READ": true, + "STANDARD_RIGHTS_REQUIRED": true, + "STANDARD_RIGHTS_WRITE": true, + "STARTF_USESHOWWINDOW": true, + "STARTF_USESTDHANDLES": true, + "STD_ERROR_HANDLE": true, + "STD_INPUT_HANDLE": true, + "STD_OUTPUT_HANDLE": true, + "SUBLANG_ENGLISH_US": true, + "SW_FORCEMINIMIZE": true, + "SW_HIDE": true, + "SW_MAXIMIZE": true, + "SW_MINIMIZE": true, + "SW_NORMAL": true, + "SW_RESTORE": true, + "SW_SHOW": true, + "SW_SHOWDEFAULT": true, + "SW_SHOWMAXIMIZED": true, + "SW_SHOWMINIMIZED": true, + "SW_SHOWMINNOACTIVE": true, + "SW_SHOWNA": true, + "SW_SHOWNOACTIVATE": true, + "SW_SHOWNORMAL": true, + "SYMBOLIC_LINK_FLAG_DIRECTORY": true, + "SYNCHRONIZE": true, + "SYSCTL_VERSION": true, + "SYSCTL_VERS_0": true, + "SYSCTL_VERS_1": true, + "SYSCTL_VERS_MASK": true, + "SYS_ABORT2": true, + "SYS_ACCEPT": true, + "SYS_ACCEPT4": true, + "SYS_ACCEPT_NOCANCEL": true, + "SYS_ACCESS": true, + "SYS_ACCESS_EXTENDED": true, + "SYS_ACCT": true, + "SYS_ADD_KEY": true, + "SYS_ADD_PROFIL": true, + "SYS_ADJFREQ": true, + "SYS_ADJTIME": true, + "SYS_ADJTIMEX": true, + "SYS_AFS_SYSCALL": true, + "SYS_AIO_CANCEL": true, + "SYS_AIO_ERROR": true, + "SYS_AIO_FSYNC": true, + "SYS_AIO_READ": true, + "SYS_AIO_RETURN": true, + "SYS_AIO_SUSPEND": true, + "SYS_AIO_SUSPEND_NOCANCEL": true, + "SYS_AIO_WRITE": true, + "SYS_ALARM": true, + "SYS_ARCH_PRCTL": true, + "SYS_ARM_FADVISE64_64": true, + "SYS_ARM_SYNC_FILE_RANGE": true, + "SYS_ATGETMSG": true, + "SYS_ATPGETREQ": true, + "SYS_ATPGETRSP": true, + "SYS_ATPSNDREQ": true, + "SYS_ATPSNDRSP": true, + "SYS_ATPUTMSG": true, + "SYS_ATSOCKET": true, + "SYS_AUDIT": true, + "SYS_AUDITCTL": true, + "SYS_AUDITON": true, + "SYS_AUDIT_SESSION_JOIN": true, + "SYS_AUDIT_SESSION_PORT": true, + "SYS_AUDIT_SESSION_SELF": true, + "SYS_BDFLUSH": true, + "SYS_BIND": true, + "SYS_BINDAT": true, + "SYS_BREAK": true, + "SYS_BRK": true, + "SYS_BSDTHREAD_CREATE": true, + "SYS_BSDTHREAD_REGISTER": true, + "SYS_BSDTHREAD_TERMINATE": true, + "SYS_CAPGET": true, + "SYS_CAPSET": true, + "SYS_CAP_ENTER": true, + "SYS_CAP_FCNTLS_GET": true, + "SYS_CAP_FCNTLS_LIMIT": true, + "SYS_CAP_GETMODE": true, + "SYS_CAP_GETRIGHTS": true, + "SYS_CAP_IOCTLS_GET": true, + "SYS_CAP_IOCTLS_LIMIT": true, + "SYS_CAP_NEW": true, + "SYS_CAP_RIGHTS_GET": true, + "SYS_CAP_RIGHTS_LIMIT": true, + "SYS_CHDIR": true, + "SYS_CHFLAGS": true, + "SYS_CHFLAGSAT": true, + "SYS_CHMOD": true, + "SYS_CHMOD_EXTENDED": true, + "SYS_CHOWN": true, + "SYS_CHOWN32": true, + "SYS_CHROOT": true, + "SYS_CHUD": true, + "SYS_CLOCK_ADJTIME": true, + "SYS_CLOCK_GETCPUCLOCKID2": true, + "SYS_CLOCK_GETRES": true, + "SYS_CLOCK_GETTIME": true, + "SYS_CLOCK_NANOSLEEP": true, + "SYS_CLOCK_SETTIME": true, + "SYS_CLONE": true, + "SYS_CLOSE": true, + "SYS_CLOSEFROM": true, + "SYS_CLOSE_NOCANCEL": true, + "SYS_CONNECT": true, + "SYS_CONNECTAT": true, + "SYS_CONNECT_NOCANCEL": true, + "SYS_COPYFILE": true, + "SYS_CPUSET": true, + "SYS_CPUSET_GETAFFINITY": true, + "SYS_CPUSET_GETID": true, + "SYS_CPUSET_SETAFFINITY": true, + "SYS_CPUSET_SETID": true, + "SYS_CREAT": true, + "SYS_CREATE_MODULE": true, + "SYS_CSOPS": true, + "SYS_DELETE": true, + "SYS_DELETE_MODULE": true, + "SYS_DUP": true, + "SYS_DUP2": true, + "SYS_DUP3": true, + "SYS_EACCESS": true, + "SYS_EPOLL_CREATE": true, + "SYS_EPOLL_CREATE1": true, + "SYS_EPOLL_CTL": true, + "SYS_EPOLL_CTL_OLD": true, + "SYS_EPOLL_PWAIT": true, + "SYS_EPOLL_WAIT": true, + "SYS_EPOLL_WAIT_OLD": true, + "SYS_EVENTFD": true, + "SYS_EVENTFD2": true, + "SYS_EXCHANGEDATA": true, + "SYS_EXECVE": true, + "SYS_EXIT": true, + "SYS_EXIT_GROUP": true, + "SYS_EXTATTRCTL": true, + "SYS_EXTATTR_DELETE_FD": true, + "SYS_EXTATTR_DELETE_FILE": true, + "SYS_EXTATTR_DELETE_LINK": true, + "SYS_EXTATTR_GET_FD": true, + "SYS_EXTATTR_GET_FILE": true, + "SYS_EXTATTR_GET_LINK": true, + "SYS_EXTATTR_LIST_FD": true, + "SYS_EXTATTR_LIST_FILE": true, + "SYS_EXTATTR_LIST_LINK": true, + "SYS_EXTATTR_SET_FD": true, + "SYS_EXTATTR_SET_FILE": true, + "SYS_EXTATTR_SET_LINK": true, + "SYS_FACCESSAT": true, + "SYS_FADVISE64": true, + "SYS_FADVISE64_64": true, + "SYS_FALLOCATE": true, + "SYS_FANOTIFY_INIT": true, + "SYS_FANOTIFY_MARK": true, + "SYS_FCHDIR": true, + "SYS_FCHFLAGS": true, + "SYS_FCHMOD": true, + "SYS_FCHMODAT": true, + "SYS_FCHMOD_EXTENDED": true, + "SYS_FCHOWN": true, + "SYS_FCHOWN32": true, + "SYS_FCHOWNAT": true, + "SYS_FCHROOT": true, + "SYS_FCNTL": true, + "SYS_FCNTL64": true, + "SYS_FCNTL_NOCANCEL": true, + "SYS_FDATASYNC": true, + "SYS_FEXECVE": true, + "SYS_FFCLOCK_GETCOUNTER": true, + "SYS_FFCLOCK_GETESTIMATE": true, + "SYS_FFCLOCK_SETESTIMATE": true, + "SYS_FFSCTL": true, + "SYS_FGETATTRLIST": true, + "SYS_FGETXATTR": true, + "SYS_FHOPEN": true, + "SYS_FHSTAT": true, + "SYS_FHSTATFS": true, + "SYS_FILEPORT_MAKEFD": true, + "SYS_FILEPORT_MAKEPORT": true, + "SYS_FKTRACE": true, + "SYS_FLISTXATTR": true, + "SYS_FLOCK": true, + "SYS_FORK": true, + "SYS_FPATHCONF": true, + "SYS_FREEBSD6_FTRUNCATE": true, + "SYS_FREEBSD6_LSEEK": true, + "SYS_FREEBSD6_MMAP": true, + "SYS_FREEBSD6_PREAD": true, + "SYS_FREEBSD6_PWRITE": true, + "SYS_FREEBSD6_TRUNCATE": true, + "SYS_FREMOVEXATTR": true, + "SYS_FSCTL": true, + "SYS_FSETATTRLIST": true, + "SYS_FSETXATTR": true, + "SYS_FSGETPATH": true, + "SYS_FSTAT": true, + "SYS_FSTAT64": true, + "SYS_FSTAT64_EXTENDED": true, + "SYS_FSTATAT": true, + "SYS_FSTATAT64": true, + "SYS_FSTATFS": true, + "SYS_FSTATFS64": true, + "SYS_FSTATV": true, + "SYS_FSTATVFS1": true, + "SYS_FSTAT_EXTENDED": true, + "SYS_FSYNC": true, + "SYS_FSYNC_NOCANCEL": true, + "SYS_FSYNC_RANGE": true, + "SYS_FTIME": true, + "SYS_FTRUNCATE": true, + "SYS_FTRUNCATE64": true, + "SYS_FUTEX": true, + "SYS_FUTIMENS": true, + "SYS_FUTIMES": true, + "SYS_FUTIMESAT": true, + "SYS_GETATTRLIST": true, + "SYS_GETAUDIT": true, + "SYS_GETAUDIT_ADDR": true, + "SYS_GETAUID": true, + "SYS_GETCONTEXT": true, + "SYS_GETCPU": true, + "SYS_GETCWD": true, + "SYS_GETDENTS": true, + "SYS_GETDENTS64": true, + "SYS_GETDIRENTRIES": true, + "SYS_GETDIRENTRIES64": true, + "SYS_GETDIRENTRIESATTR": true, + "SYS_GETDTABLECOUNT": true, + "SYS_GETDTABLESIZE": true, + "SYS_GETEGID": true, + "SYS_GETEGID32": true, + "SYS_GETEUID": true, + "SYS_GETEUID32": true, + "SYS_GETFH": true, + "SYS_GETFSSTAT": true, + "SYS_GETFSSTAT64": true, + "SYS_GETGID": true, + "SYS_GETGID32": true, + "SYS_GETGROUPS": true, + "SYS_GETGROUPS32": true, + "SYS_GETHOSTUUID": true, + "SYS_GETITIMER": true, + "SYS_GETLCID": true, + "SYS_GETLOGIN": true, + "SYS_GETLOGINCLASS": true, + "SYS_GETPEERNAME": true, + "SYS_GETPGID": true, + "SYS_GETPGRP": true, + "SYS_GETPID": true, + "SYS_GETPMSG": true, + "SYS_GETPPID": true, + "SYS_GETPRIORITY": true, + "SYS_GETRESGID": true, + "SYS_GETRESGID32": true, + "SYS_GETRESUID": true, + "SYS_GETRESUID32": true, + "SYS_GETRLIMIT": true, + "SYS_GETRTABLE": true, + "SYS_GETRUSAGE": true, + "SYS_GETSGROUPS": true, + "SYS_GETSID": true, + "SYS_GETSOCKNAME": true, + "SYS_GETSOCKOPT": true, + "SYS_GETTHRID": true, + "SYS_GETTID": true, + "SYS_GETTIMEOFDAY": true, + "SYS_GETUID": true, + "SYS_GETUID32": true, + "SYS_GETVFSSTAT": true, + "SYS_GETWGROUPS": true, + "SYS_GETXATTR": true, + "SYS_GET_KERNEL_SYMS": true, + "SYS_GET_MEMPOLICY": true, + "SYS_GET_ROBUST_LIST": true, + "SYS_GET_THREAD_AREA": true, + "SYS_GTTY": true, + "SYS_IDENTITYSVC": true, + "SYS_IDLE": true, + "SYS_INITGROUPS": true, + "SYS_INIT_MODULE": true, + "SYS_INOTIFY_ADD_WATCH": true, + "SYS_INOTIFY_INIT": true, + "SYS_INOTIFY_INIT1": true, + "SYS_INOTIFY_RM_WATCH": true, + "SYS_IOCTL": true, + "SYS_IOPERM": true, + "SYS_IOPL": true, + "SYS_IOPOLICYSYS": true, + "SYS_IOPRIO_GET": true, + "SYS_IOPRIO_SET": true, + "SYS_IO_CANCEL": true, + "SYS_IO_DESTROY": true, + "SYS_IO_GETEVENTS": true, + "SYS_IO_SETUP": true, + "SYS_IO_SUBMIT": true, + "SYS_IPC": true, + "SYS_ISSETUGID": true, + "SYS_JAIL": true, + "SYS_JAIL_ATTACH": true, + "SYS_JAIL_GET": true, + "SYS_JAIL_REMOVE": true, + "SYS_JAIL_SET": true, + "SYS_KDEBUG_TRACE": true, + "SYS_KENV": true, + "SYS_KEVENT": true, + "SYS_KEVENT64": true, + "SYS_KEXEC_LOAD": true, + "SYS_KEYCTL": true, + "SYS_KILL": true, + "SYS_KLDFIND": true, + "SYS_KLDFIRSTMOD": true, + "SYS_KLDLOAD": true, + "SYS_KLDNEXT": true, + "SYS_KLDSTAT": true, + "SYS_KLDSYM": true, + "SYS_KLDUNLOAD": true, + "SYS_KLDUNLOADF": true, + "SYS_KQUEUE": true, + "SYS_KQUEUE1": true, + "SYS_KTIMER_CREATE": true, + "SYS_KTIMER_DELETE": true, + "SYS_KTIMER_GETOVERRUN": true, + "SYS_KTIMER_GETTIME": true, + "SYS_KTIMER_SETTIME": true, + "SYS_KTRACE": true, + "SYS_LCHFLAGS": true, + "SYS_LCHMOD": true, + "SYS_LCHOWN": true, + "SYS_LCHOWN32": true, + "SYS_LGETFH": true, + "SYS_LGETXATTR": true, + "SYS_LINK": true, + "SYS_LINKAT": true, + "SYS_LIO_LISTIO": true, + "SYS_LISTEN": true, + "SYS_LISTXATTR": true, + "SYS_LLISTXATTR": true, + "SYS_LOCK": true, + "SYS_LOOKUP_DCOOKIE": true, + "SYS_LPATHCONF": true, + "SYS_LREMOVEXATTR": true, + "SYS_LSEEK": true, + "SYS_LSETXATTR": true, + "SYS_LSTAT": true, + "SYS_LSTAT64": true, + "SYS_LSTAT64_EXTENDED": true, + "SYS_LSTATV": true, + "SYS_LSTAT_EXTENDED": true, + "SYS_LUTIMES": true, + "SYS_MAC_SYSCALL": true, + "SYS_MADVISE": true, + "SYS_MADVISE1": true, + "SYS_MAXSYSCALL": true, + "SYS_MBIND": true, + "SYS_MIGRATE_PAGES": true, + "SYS_MINCORE": true, + "SYS_MINHERIT": true, + "SYS_MKCOMPLEX": true, + "SYS_MKDIR": true, + "SYS_MKDIRAT": true, + "SYS_MKDIR_EXTENDED": true, + "SYS_MKFIFO": true, + "SYS_MKFIFOAT": true, + "SYS_MKFIFO_EXTENDED": true, + "SYS_MKNOD": true, + "SYS_MKNODAT": true, + "SYS_MLOCK": true, + "SYS_MLOCKALL": true, + "SYS_MMAP": true, + "SYS_MMAP2": true, + "SYS_MODCTL": true, + "SYS_MODFIND": true, + "SYS_MODFNEXT": true, + "SYS_MODIFY_LDT": true, + "SYS_MODNEXT": true, + "SYS_MODSTAT": true, + "SYS_MODWATCH": true, + "SYS_MOUNT": true, + "SYS_MOVE_PAGES": true, + "SYS_MPROTECT": true, + "SYS_MPX": true, + "SYS_MQUERY": true, + "SYS_MQ_GETSETATTR": true, + "SYS_MQ_NOTIFY": true, + "SYS_MQ_OPEN": true, + "SYS_MQ_TIMEDRECEIVE": true, + "SYS_MQ_TIMEDSEND": true, + "SYS_MQ_UNLINK": true, + "SYS_MREMAP": true, + "SYS_MSGCTL": true, + "SYS_MSGGET": true, + "SYS_MSGRCV": true, + "SYS_MSGRCV_NOCANCEL": true, + "SYS_MSGSND": true, + "SYS_MSGSND_NOCANCEL": true, + "SYS_MSGSYS": true, + "SYS_MSYNC": true, + "SYS_MSYNC_NOCANCEL": true, + "SYS_MUNLOCK": true, + "SYS_MUNLOCKALL": true, + "SYS_MUNMAP": true, + "SYS_NAME_TO_HANDLE_AT": true, + "SYS_NANOSLEEP": true, + "SYS_NEWFSTATAT": true, + "SYS_NFSCLNT": true, + "SYS_NFSSERVCTL": true, + "SYS_NFSSVC": true, + "SYS_NFSTAT": true, + "SYS_NICE": true, + "SYS_NLSTAT": true, + "SYS_NMOUNT": true, + "SYS_NSTAT": true, + "SYS_NTP_ADJTIME": true, + "SYS_NTP_GETTIME": true, + "SYS_OABI_SYSCALL_BASE": true, + "SYS_OBREAK": true, + "SYS_OLDFSTAT": true, + "SYS_OLDLSTAT": true, + "SYS_OLDOLDUNAME": true, + "SYS_OLDSTAT": true, + "SYS_OLDUNAME": true, + "SYS_OPEN": true, + "SYS_OPENAT": true, + "SYS_OPENBSD_POLL": true, + "SYS_OPEN_BY_HANDLE_AT": true, + "SYS_OPEN_EXTENDED": true, + "SYS_OPEN_NOCANCEL": true, + "SYS_OVADVISE": true, + "SYS_PACCEPT": true, + "SYS_PATHCONF": true, + "SYS_PAUSE": true, + "SYS_PCICONFIG_IOBASE": true, + "SYS_PCICONFIG_READ": true, + "SYS_PCICONFIG_WRITE": true, + "SYS_PDFORK": true, + "SYS_PDGETPID": true, + "SYS_PDKILL": true, + "SYS_PERF_EVENT_OPEN": true, + "SYS_PERSONALITY": true, + "SYS_PID_HIBERNATE": true, + "SYS_PID_RESUME": true, + "SYS_PID_SHUTDOWN_SOCKETS": true, + "SYS_PID_SUSPEND": true, + "SYS_PIPE": true, + "SYS_PIPE2": true, + "SYS_PIVOT_ROOT": true, + "SYS_PMC_CONTROL": true, + "SYS_PMC_GET_INFO": true, + "SYS_POLL": true, + "SYS_POLLTS": true, + "SYS_POLL_NOCANCEL": true, + "SYS_POSIX_FADVISE": true, + "SYS_POSIX_FALLOCATE": true, + "SYS_POSIX_OPENPT": true, + "SYS_POSIX_SPAWN": true, + "SYS_PPOLL": true, + "SYS_PRCTL": true, + "SYS_PREAD": true, + "SYS_PREAD64": true, + "SYS_PREADV": true, + "SYS_PREAD_NOCANCEL": true, + "SYS_PRLIMIT64": true, + "SYS_PROCCTL": true, + "SYS_PROCESS_POLICY": true, + "SYS_PROCESS_VM_READV": true, + "SYS_PROCESS_VM_WRITEV": true, + "SYS_PROC_INFO": true, + "SYS_PROF": true, + "SYS_PROFIL": true, + "SYS_PSELECT": true, + "SYS_PSELECT6": true, + "SYS_PSET_ASSIGN": true, + "SYS_PSET_CREATE": true, + "SYS_PSET_DESTROY": true, + "SYS_PSYNCH_CVBROAD": true, + "SYS_PSYNCH_CVCLRPREPOST": true, + "SYS_PSYNCH_CVSIGNAL": true, + "SYS_PSYNCH_CVWAIT": true, + "SYS_PSYNCH_MUTEXDROP": true, + "SYS_PSYNCH_MUTEXWAIT": true, + "SYS_PSYNCH_RW_DOWNGRADE": true, + "SYS_PSYNCH_RW_LONGRDLOCK": true, + "SYS_PSYNCH_RW_RDLOCK": true, + "SYS_PSYNCH_RW_UNLOCK": true, + "SYS_PSYNCH_RW_UNLOCK2": true, + "SYS_PSYNCH_RW_UPGRADE": true, + "SYS_PSYNCH_RW_WRLOCK": true, + "SYS_PSYNCH_RW_YIELDWRLOCK": true, + "SYS_PTRACE": true, + "SYS_PUTPMSG": true, + "SYS_PWRITE": true, + "SYS_PWRITE64": true, + "SYS_PWRITEV": true, + "SYS_PWRITE_NOCANCEL": true, + "SYS_QUERY_MODULE": true, + "SYS_QUOTACTL": true, + "SYS_RASCTL": true, + "SYS_RCTL_ADD_RULE": true, + "SYS_RCTL_GET_LIMITS": true, + "SYS_RCTL_GET_RACCT": true, + "SYS_RCTL_GET_RULES": true, + "SYS_RCTL_REMOVE_RULE": true, + "SYS_READ": true, + "SYS_READAHEAD": true, + "SYS_READDIR": true, + "SYS_READLINK": true, + "SYS_READLINKAT": true, + "SYS_READV": true, + "SYS_READV_NOCANCEL": true, + "SYS_READ_NOCANCEL": true, + "SYS_REBOOT": true, + "SYS_RECV": true, + "SYS_RECVFROM": true, + "SYS_RECVFROM_NOCANCEL": true, + "SYS_RECVMMSG": true, + "SYS_RECVMSG": true, + "SYS_RECVMSG_NOCANCEL": true, + "SYS_REMAP_FILE_PAGES": true, + "SYS_REMOVEXATTR": true, + "SYS_RENAME": true, + "SYS_RENAMEAT": true, + "SYS_REQUEST_KEY": true, + "SYS_RESTART_SYSCALL": true, + "SYS_REVOKE": true, + "SYS_RFORK": true, + "SYS_RMDIR": true, + "SYS_RTPRIO": true, + "SYS_RTPRIO_THREAD": true, + "SYS_RT_SIGACTION": true, + "SYS_RT_SIGPENDING": true, + "SYS_RT_SIGPROCMASK": true, + "SYS_RT_SIGQUEUEINFO": true, + "SYS_RT_SIGRETURN": true, + "SYS_RT_SIGSUSPEND": true, + "SYS_RT_SIGTIMEDWAIT": true, + "SYS_RT_TGSIGQUEUEINFO": true, + "SYS_SBRK": true, + "SYS_SCHED_GETAFFINITY": true, + "SYS_SCHED_GETPARAM": true, + "SYS_SCHED_GETSCHEDULER": true, + "SYS_SCHED_GET_PRIORITY_MAX": true, + "SYS_SCHED_GET_PRIORITY_MIN": true, + "SYS_SCHED_RR_GET_INTERVAL": true, + "SYS_SCHED_SETAFFINITY": true, + "SYS_SCHED_SETPARAM": true, + "SYS_SCHED_SETSCHEDULER": true, + "SYS_SCHED_YIELD": true, + "SYS_SCTP_GENERIC_RECVMSG": true, + "SYS_SCTP_GENERIC_SENDMSG": true, + "SYS_SCTP_GENERIC_SENDMSG_IOV": true, + "SYS_SCTP_PEELOFF": true, + "SYS_SEARCHFS": true, + "SYS_SECURITY": true, + "SYS_SELECT": true, + "SYS_SELECT_NOCANCEL": true, + "SYS_SEMCONFIG": true, + "SYS_SEMCTL": true, + "SYS_SEMGET": true, + "SYS_SEMOP": true, + "SYS_SEMSYS": true, + "SYS_SEMTIMEDOP": true, + "SYS_SEM_CLOSE": true, + "SYS_SEM_DESTROY": true, + "SYS_SEM_GETVALUE": true, + "SYS_SEM_INIT": true, + "SYS_SEM_OPEN": true, + "SYS_SEM_POST": true, + "SYS_SEM_TRYWAIT": true, + "SYS_SEM_UNLINK": true, + "SYS_SEM_WAIT": true, + "SYS_SEM_WAIT_NOCANCEL": true, + "SYS_SEND": true, + "SYS_SENDFILE": true, + "SYS_SENDFILE64": true, + "SYS_SENDMMSG": true, + "SYS_SENDMSG": true, + "SYS_SENDMSG_NOCANCEL": true, + "SYS_SENDTO": true, + "SYS_SENDTO_NOCANCEL": true, + "SYS_SETATTRLIST": true, + "SYS_SETAUDIT": true, + "SYS_SETAUDIT_ADDR": true, + "SYS_SETAUID": true, + "SYS_SETCONTEXT": true, + "SYS_SETDOMAINNAME": true, + "SYS_SETEGID": true, + "SYS_SETEUID": true, + "SYS_SETFIB": true, + "SYS_SETFSGID": true, + "SYS_SETFSGID32": true, + "SYS_SETFSUID": true, + "SYS_SETFSUID32": true, + "SYS_SETGID": true, + "SYS_SETGID32": true, + "SYS_SETGROUPS": true, + "SYS_SETGROUPS32": true, + "SYS_SETHOSTNAME": true, + "SYS_SETITIMER": true, + "SYS_SETLCID": true, + "SYS_SETLOGIN": true, + "SYS_SETLOGINCLASS": true, + "SYS_SETNS": true, + "SYS_SETPGID": true, + "SYS_SETPRIORITY": true, + "SYS_SETPRIVEXEC": true, + "SYS_SETREGID": true, + "SYS_SETREGID32": true, + "SYS_SETRESGID": true, + "SYS_SETRESGID32": true, + "SYS_SETRESUID": true, + "SYS_SETRESUID32": true, + "SYS_SETREUID": true, + "SYS_SETREUID32": true, + "SYS_SETRLIMIT": true, + "SYS_SETRTABLE": true, + "SYS_SETSGROUPS": true, + "SYS_SETSID": true, + "SYS_SETSOCKOPT": true, + "SYS_SETTID": true, + "SYS_SETTID_WITH_PID": true, + "SYS_SETTIMEOFDAY": true, + "SYS_SETUID": true, + "SYS_SETUID32": true, + "SYS_SETWGROUPS": true, + "SYS_SETXATTR": true, + "SYS_SET_MEMPOLICY": true, + "SYS_SET_ROBUST_LIST": true, + "SYS_SET_THREAD_AREA": true, + "SYS_SET_TID_ADDRESS": true, + "SYS_SGETMASK": true, + "SYS_SHARED_REGION_CHECK_NP": true, + "SYS_SHARED_REGION_MAP_AND_SLIDE_NP": true, + "SYS_SHMAT": true, + "SYS_SHMCTL": true, + "SYS_SHMDT": true, + "SYS_SHMGET": true, + "SYS_SHMSYS": true, + "SYS_SHM_OPEN": true, + "SYS_SHM_UNLINK": true, + "SYS_SHUTDOWN": true, + "SYS_SIGACTION": true, + "SYS_SIGALTSTACK": true, + "SYS_SIGNAL": true, + "SYS_SIGNALFD": true, + "SYS_SIGNALFD4": true, + "SYS_SIGPENDING": true, + "SYS_SIGPROCMASK": true, + "SYS_SIGQUEUE": true, + "SYS_SIGQUEUEINFO": true, + "SYS_SIGRETURN": true, + "SYS_SIGSUSPEND": true, + "SYS_SIGSUSPEND_NOCANCEL": true, + "SYS_SIGTIMEDWAIT": true, + "SYS_SIGWAIT": true, + "SYS_SIGWAITINFO": true, + "SYS_SOCKET": true, + "SYS_SOCKETCALL": true, + "SYS_SOCKETPAIR": true, + "SYS_SPLICE": true, + "SYS_SSETMASK": true, + "SYS_SSTK": true, + "SYS_STACK_SNAPSHOT": true, + "SYS_STAT": true, + "SYS_STAT64": true, + "SYS_STAT64_EXTENDED": true, + "SYS_STATFS": true, + "SYS_STATFS64": true, + "SYS_STATV": true, + "SYS_STATVFS1": true, + "SYS_STAT_EXTENDED": true, + "SYS_STIME": true, + "SYS_STTY": true, + "SYS_SWAPCONTEXT": true, + "SYS_SWAPCTL": true, + "SYS_SWAPOFF": true, + "SYS_SWAPON": true, + "SYS_SYMLINK": true, + "SYS_SYMLINKAT": true, + "SYS_SYNC": true, + "SYS_SYNCFS": true, + "SYS_SYNC_FILE_RANGE": true, + "SYS_SYSARCH": true, + "SYS_SYSCALL": true, + "SYS_SYSCALL_BASE": true, + "SYS_SYSFS": true, + "SYS_SYSINFO": true, + "SYS_SYSLOG": true, + "SYS_TEE": true, + "SYS_TGKILL": true, + "SYS_THREAD_SELFID": true, + "SYS_THR_CREATE": true, + "SYS_THR_EXIT": true, + "SYS_THR_KILL": true, + "SYS_THR_KILL2": true, + "SYS_THR_NEW": true, + "SYS_THR_SELF": true, + "SYS_THR_SET_NAME": true, + "SYS_THR_SUSPEND": true, + "SYS_THR_WAKE": true, + "SYS_TIME": true, + "SYS_TIMERFD_CREATE": true, + "SYS_TIMERFD_GETTIME": true, + "SYS_TIMERFD_SETTIME": true, + "SYS_TIMER_CREATE": true, + "SYS_TIMER_DELETE": true, + "SYS_TIMER_GETOVERRUN": true, + "SYS_TIMER_GETTIME": true, + "SYS_TIMER_SETTIME": true, + "SYS_TIMES": true, + "SYS_TKILL": true, + "SYS_TRUNCATE": true, + "SYS_TRUNCATE64": true, + "SYS_TUXCALL": true, + "SYS_UGETRLIMIT": true, + "SYS_ULIMIT": true, + "SYS_UMASK": true, + "SYS_UMASK_EXTENDED": true, + "SYS_UMOUNT": true, + "SYS_UMOUNT2": true, + "SYS_UNAME": true, + "SYS_UNDELETE": true, + "SYS_UNLINK": true, + "SYS_UNLINKAT": true, + "SYS_UNMOUNT": true, + "SYS_UNSHARE": true, + "SYS_USELIB": true, + "SYS_USTAT": true, + "SYS_UTIME": true, + "SYS_UTIMENSAT": true, + "SYS_UTIMES": true, + "SYS_UTRACE": true, + "SYS_UUIDGEN": true, + "SYS_VADVISE": true, + "SYS_VFORK": true, + "SYS_VHANGUP": true, + "SYS_VM86": true, + "SYS_VM86OLD": true, + "SYS_VMSPLICE": true, + "SYS_VM_PRESSURE_MONITOR": true, + "SYS_VSERVER": true, + "SYS_WAIT4": true, + "SYS_WAIT4_NOCANCEL": true, + "SYS_WAIT6": true, + "SYS_WAITEVENT": true, + "SYS_WAITID": true, + "SYS_WAITID_NOCANCEL": true, + "SYS_WAITPID": true, + "SYS_WATCHEVENT": true, + "SYS_WORKQ_KERNRETURN": true, + "SYS_WORKQ_OPEN": true, + "SYS_WRITE": true, + "SYS_WRITEV": true, + "SYS_WRITEV_NOCANCEL": true, + "SYS_WRITE_NOCANCEL": true, + "SYS_YIELD": true, + "SYS__LLSEEK": true, + "SYS__LWP_CONTINUE": true, + "SYS__LWP_CREATE": true, + "SYS__LWP_CTL": true, + "SYS__LWP_DETACH": true, + "SYS__LWP_EXIT": true, + "SYS__LWP_GETNAME": true, + "SYS__LWP_GETPRIVATE": true, + "SYS__LWP_KILL": true, + "SYS__LWP_PARK": true, + "SYS__LWP_SELF": true, + "SYS__LWP_SETNAME": true, + "SYS__LWP_SETPRIVATE": true, + "SYS__LWP_SUSPEND": true, + "SYS__LWP_UNPARK": true, + "SYS__LWP_UNPARK_ALL": true, + "SYS__LWP_WAIT": true, + "SYS__LWP_WAKEUP": true, + "SYS__NEWSELECT": true, + "SYS__PSET_BIND": true, + "SYS__SCHED_GETAFFINITY": true, + "SYS__SCHED_GETPARAM": true, + "SYS__SCHED_SETAFFINITY": true, + "SYS__SCHED_SETPARAM": true, + "SYS__SYSCTL": true, + "SYS__UMTX_LOCK": true, + "SYS__UMTX_OP": true, + "SYS__UMTX_UNLOCK": true, + "SYS___ACL_ACLCHECK_FD": true, + "SYS___ACL_ACLCHECK_FILE": true, + "SYS___ACL_ACLCHECK_LINK": true, + "SYS___ACL_DELETE_FD": true, + "SYS___ACL_DELETE_FILE": true, + "SYS___ACL_DELETE_LINK": true, + "SYS___ACL_GET_FD": true, + "SYS___ACL_GET_FILE": true, + "SYS___ACL_GET_LINK": true, + "SYS___ACL_SET_FD": true, + "SYS___ACL_SET_FILE": true, + "SYS___ACL_SET_LINK": true, + "SYS___CLONE": true, + "SYS___DISABLE_THREADSIGNAL": true, + "SYS___GETCWD": true, + "SYS___GETLOGIN": true, + "SYS___GET_TCB": true, + "SYS___MAC_EXECVE": true, + "SYS___MAC_GETFSSTAT": true, + "SYS___MAC_GET_FD": true, + "SYS___MAC_GET_FILE": true, + "SYS___MAC_GET_LCID": true, + "SYS___MAC_GET_LCTX": true, + "SYS___MAC_GET_LINK": true, + "SYS___MAC_GET_MOUNT": true, + "SYS___MAC_GET_PID": true, + "SYS___MAC_GET_PROC": true, + "SYS___MAC_MOUNT": true, + "SYS___MAC_SET_FD": true, + "SYS___MAC_SET_FILE": true, + "SYS___MAC_SET_LCTX": true, + "SYS___MAC_SET_LINK": true, + "SYS___MAC_SET_PROC": true, + "SYS___MAC_SYSCALL": true, + "SYS___OLD_SEMWAIT_SIGNAL": true, + "SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL": true, + "SYS___POSIX_CHOWN": true, + "SYS___POSIX_FCHOWN": true, + "SYS___POSIX_LCHOWN": true, + "SYS___POSIX_RENAME": true, + "SYS___PTHREAD_CANCELED": true, + "SYS___PTHREAD_CHDIR": true, + "SYS___PTHREAD_FCHDIR": true, + "SYS___PTHREAD_KILL": true, + "SYS___PTHREAD_MARKCANCEL": true, + "SYS___PTHREAD_SIGMASK": true, + "SYS___QUOTACTL": true, + "SYS___SEMCTL": true, + "SYS___SEMWAIT_SIGNAL": true, + "SYS___SEMWAIT_SIGNAL_NOCANCEL": true, + "SYS___SETLOGIN": true, + "SYS___SETUGID": true, + "SYS___SET_TCB": true, + "SYS___SIGACTION_SIGTRAMP": true, + "SYS___SIGTIMEDWAIT": true, + "SYS___SIGWAIT": true, + "SYS___SIGWAIT_NOCANCEL": true, + "SYS___SYSCTL": true, + "SYS___TFORK": true, + "SYS___THREXIT": true, + "SYS___THRSIGDIVERT": true, + "SYS___THRSLEEP": true, + "SYS___THRWAKEUP": true, + "S_ARCH1": true, + "S_ARCH2": true, + "S_BLKSIZE": true, + "S_IEXEC": true, + "S_IFBLK": true, + "S_IFCHR": true, + "S_IFDIR": true, + "S_IFIFO": true, + "S_IFLNK": true, + "S_IFMT": true, + "S_IFREG": true, + "S_IFSOCK": true, + "S_IFWHT": true, + "S_IREAD": true, + "S_IRGRP": true, + "S_IROTH": true, + "S_IRUSR": true, + "S_IRWXG": true, + "S_IRWXO": true, + "S_IRWXU": true, + "S_ISGID": true, + "S_ISTXT": true, + "S_ISUID": true, + "S_ISVTX": true, + "S_IWGRP": true, + "S_IWOTH": true, + "S_IWRITE": true, + "S_IWUSR": true, + "S_IXGRP": true, + "S_IXOTH": true, + "S_IXUSR": true, + "S_LOGIN_SET": true, + "SecurityAttributes": true, + "Seek": true, + "Select": true, + "Sendfile": true, + "Sendmsg": true, + "SendmsgN": true, + "Sendto": true, + "Servent": true, + "SetBpf": true, + "SetBpfBuflen": true, + "SetBpfDatalink": true, + "SetBpfHeadercmpl": true, + "SetBpfImmediate": true, + "SetBpfInterface": true, + "SetBpfPromisc": true, + "SetBpfTimeout": true, + "SetCurrentDirectory": true, + "SetEndOfFile": true, + "SetEnvironmentVariable": true, + "SetFileAttributes": true, + "SetFileCompletionNotificationModes": true, + "SetFilePointer": true, + "SetFileTime": true, + "SetHandleInformation": true, + "SetKevent": true, + "SetLsfPromisc": true, + "SetNonblock": true, + "Setdomainname": true, + "Setegid": true, + "Setenv": true, + "Seteuid": true, + "Setfsgid": true, + "Setfsuid": true, + "Setgid": true, + "Setgroups": true, + "Sethostname": true, + "Setlogin": true, + "Setpgid": true, + "Setpriority": true, + "Setprivexec": true, + "Setregid": true, + "Setresgid": true, + "Setresuid": true, + "Setreuid": true, + "Setrlimit": true, + "Setsid": true, + "Setsockopt": true, + "SetsockoptByte": true, + "SetsockoptICMPv6Filter": true, + "SetsockoptIPMreq": true, + "SetsockoptIPMreqn": true, + "SetsockoptIPv6Mreq": true, + "SetsockoptInet4Addr": true, + "SetsockoptInt": true, + "SetsockoptLinger": true, + "SetsockoptString": true, + "SetsockoptTimeval": true, + "Settimeofday": true, + "Setuid": true, + "Setxattr": true, + "Shutdown": true, + "SidTypeAlias": true, + "SidTypeComputer": true, + "SidTypeDeletedAccount": true, + "SidTypeDomain": true, + "SidTypeGroup": true, + "SidTypeInvalid": true, + "SidTypeLabel": true, + "SidTypeUnknown": true, + "SidTypeUser": true, + "SidTypeWellKnownGroup": true, + "Signal": true, + "SizeofBpfHdr": true, + "SizeofBpfInsn": true, + "SizeofBpfProgram": true, + "SizeofBpfStat": true, + "SizeofBpfVersion": true, + "SizeofBpfZbuf": true, + "SizeofBpfZbufHeader": true, + "SizeofCmsghdr": true, + "SizeofICMPv6Filter": true, + "SizeofIPMreq": true, + "SizeofIPMreqn": true, + "SizeofIPv6MTUInfo": true, + "SizeofIPv6Mreq": true, + "SizeofIfAddrmsg": true, + "SizeofIfAnnounceMsghdr": true, + "SizeofIfData": true, + "SizeofIfInfomsg": true, + "SizeofIfMsghdr": true, + "SizeofIfaMsghdr": true, + "SizeofIfmaMsghdr": true, + "SizeofIfmaMsghdr2": true, + "SizeofInet4Pktinfo": true, + "SizeofInet6Pktinfo": true, + "SizeofInotifyEvent": true, + "SizeofLinger": true, + "SizeofMsghdr": true, + "SizeofNlAttr": true, + "SizeofNlMsgerr": true, + "SizeofNlMsghdr": true, + "SizeofRtAttr": true, + "SizeofRtGenmsg": true, + "SizeofRtMetrics": true, + "SizeofRtMsg": true, + "SizeofRtMsghdr": true, + "SizeofRtNexthop": true, + "SizeofSockFilter": true, + "SizeofSockFprog": true, + "SizeofSockaddrAny": true, + "SizeofSockaddrDatalink": true, + "SizeofSockaddrInet4": true, + "SizeofSockaddrInet6": true, + "SizeofSockaddrLinklayer": true, + "SizeofSockaddrNetlink": true, + "SizeofSockaddrUnix": true, + "SizeofTCPInfo": true, + "SizeofUcred": true, + "SlicePtrFromStrings": true, + "SockFilter": true, + "SockFprog": true, + "Sockaddr": true, + "SockaddrDatalink": true, + "SockaddrGen": true, + "SockaddrInet4": true, + "SockaddrInet6": true, + "SockaddrLinklayer": true, + "SockaddrNetlink": true, + "SockaddrUnix": true, + "Socket": true, + "SocketControlMessage": true, + "SocketDisableIPv6": true, + "Socketpair": true, + "Splice": true, + "StartProcess": true, + "StartupInfo": true, + "Stat": true, + "Stat_t": true, + "Statfs": true, + "Statfs_t": true, + "Stderr": true, + "Stdin": true, + "Stdout": true, + "StringBytePtr": true, + "StringByteSlice": true, + "StringSlicePtr": true, + "StringToSid": true, + "StringToUTF16": true, + "StringToUTF16Ptr": true, + "Symlink": true, + "Sync": true, + "SyncFileRange": true, + "SysProcAttr": true, + "SysProcIDMap": true, + "Syscall": true, + "Syscall12": true, + "Syscall15": true, + "Syscall18": true, + "Syscall6": true, + "Syscall9": true, + "Sysctl": true, + "SysctlUint32": true, + "Sysctlnode": true, + "Sysinfo": true, + "Sysinfo_t": true, + "Systemtime": true, + "TCGETS": true, + "TCIFLUSH": true, + "TCIOFLUSH": true, + "TCOFLUSH": true, + "TCPInfo": true, + "TCPKeepalive": true, + "TCP_CA_NAME_MAX": true, + "TCP_CONGCTL": true, + "TCP_CONGESTION": true, + "TCP_CONNECTIONTIMEOUT": true, + "TCP_CORK": true, + "TCP_DEFER_ACCEPT": true, + "TCP_INFO": true, + "TCP_KEEPALIVE": true, + "TCP_KEEPCNT": true, + "TCP_KEEPIDLE": true, + "TCP_KEEPINIT": true, + "TCP_KEEPINTVL": true, + "TCP_LINGER2": true, + "TCP_MAXBURST": true, + "TCP_MAXHLEN": true, + "TCP_MAXOLEN": true, + "TCP_MAXSEG": true, + "TCP_MAXWIN": true, + "TCP_MAX_SACK": true, + "TCP_MAX_WINSHIFT": true, + "TCP_MD5SIG": true, + "TCP_MD5SIG_MAXKEYLEN": true, + "TCP_MINMSS": true, + "TCP_MINMSSOVERLOAD": true, + "TCP_MSS": true, + "TCP_NODELAY": true, + "TCP_NOOPT": true, + "TCP_NOPUSH": true, + "TCP_NSTATES": true, + "TCP_QUICKACK": true, + "TCP_RXT_CONNDROPTIME": true, + "TCP_RXT_FINDROP": true, + "TCP_SACK_ENABLE": true, + "TCP_SYNCNT": true, + "TCP_VENDOR": true, + "TCP_WINDOW_CLAMP": true, + "TCSAFLUSH": true, + "TCSETS": true, + "TF_DISCONNECT": true, + "TF_REUSE_SOCKET": true, + "TF_USE_DEFAULT_WORKER": true, + "TF_USE_KERNEL_APC": true, + "TF_USE_SYSTEM_THREAD": true, + "TF_WRITE_BEHIND": true, + "TH32CS_INHERIT": true, + "TH32CS_SNAPALL": true, + "TH32CS_SNAPHEAPLIST": true, + "TH32CS_SNAPMODULE": true, + "TH32CS_SNAPMODULE32": true, + "TH32CS_SNAPPROCESS": true, + "TH32CS_SNAPTHREAD": true, + "TIME_ZONE_ID_DAYLIGHT": true, + "TIME_ZONE_ID_STANDARD": true, + "TIME_ZONE_ID_UNKNOWN": true, + "TIOCCBRK": true, + "TIOCCDTR": true, + "TIOCCONS": true, + "TIOCDCDTIMESTAMP": true, + "TIOCDRAIN": true, + "TIOCDSIMICROCODE": true, + "TIOCEXCL": true, + "TIOCEXT": true, + "TIOCFLAG_CDTRCTS": true, + "TIOCFLAG_CLOCAL": true, + "TIOCFLAG_CRTSCTS": true, + "TIOCFLAG_MDMBUF": true, + "TIOCFLAG_PPS": true, + "TIOCFLAG_SOFTCAR": true, + "TIOCFLUSH": true, + "TIOCGDEV": true, + "TIOCGDRAINWAIT": true, + "TIOCGETA": true, + "TIOCGETD": true, + "TIOCGFLAGS": true, + "TIOCGICOUNT": true, + "TIOCGLCKTRMIOS": true, + "TIOCGLINED": true, + "TIOCGPGRP": true, + "TIOCGPTN": true, + "TIOCGQSIZE": true, + "TIOCGRANTPT": true, + "TIOCGRS485": true, + "TIOCGSERIAL": true, + "TIOCGSID": true, + "TIOCGSIZE": true, + "TIOCGSOFTCAR": true, + "TIOCGTSTAMP": true, + "TIOCGWINSZ": true, + "TIOCINQ": true, + "TIOCIXOFF": true, + "TIOCIXON": true, + "TIOCLINUX": true, + "TIOCMBIC": true, + "TIOCMBIS": true, + "TIOCMGDTRWAIT": true, + "TIOCMGET": true, + "TIOCMIWAIT": true, + "TIOCMODG": true, + "TIOCMODS": true, + "TIOCMSDTRWAIT": true, + "TIOCMSET": true, + "TIOCM_CAR": true, + "TIOCM_CD": true, + "TIOCM_CTS": true, + "TIOCM_DCD": true, + "TIOCM_DSR": true, + "TIOCM_DTR": true, + "TIOCM_LE": true, + "TIOCM_RI": true, + "TIOCM_RNG": true, + "TIOCM_RTS": true, + "TIOCM_SR": true, + "TIOCM_ST": true, + "TIOCNOTTY": true, + "TIOCNXCL": true, + "TIOCOUTQ": true, + "TIOCPKT": true, + "TIOCPKT_DATA": true, + "TIOCPKT_DOSTOP": true, + "TIOCPKT_FLUSHREAD": true, + "TIOCPKT_FLUSHWRITE": true, + "TIOCPKT_IOCTL": true, + "TIOCPKT_NOSTOP": true, + "TIOCPKT_START": true, + "TIOCPKT_STOP": true, + "TIOCPTMASTER": true, + "TIOCPTMGET": true, + "TIOCPTSNAME": true, + "TIOCPTYGNAME": true, + "TIOCPTYGRANT": true, + "TIOCPTYUNLK": true, + "TIOCRCVFRAME": true, + "TIOCREMOTE": true, + "TIOCSBRK": true, + "TIOCSCONS": true, + "TIOCSCTTY": true, + "TIOCSDRAINWAIT": true, + "TIOCSDTR": true, + "TIOCSERCONFIG": true, + "TIOCSERGETLSR": true, + "TIOCSERGETMULTI": true, + "TIOCSERGSTRUCT": true, + "TIOCSERGWILD": true, + "TIOCSERSETMULTI": true, + "TIOCSERSWILD": true, + "TIOCSER_TEMT": true, + "TIOCSETA": true, + "TIOCSETAF": true, + "TIOCSETAW": true, + "TIOCSETD": true, + "TIOCSFLAGS": true, + "TIOCSIG": true, + "TIOCSLCKTRMIOS": true, + "TIOCSLINED": true, + "TIOCSPGRP": true, + "TIOCSPTLCK": true, + "TIOCSQSIZE": true, + "TIOCSRS485": true, + "TIOCSSERIAL": true, + "TIOCSSIZE": true, + "TIOCSSOFTCAR": true, + "TIOCSTART": true, + "TIOCSTAT": true, + "TIOCSTI": true, + "TIOCSTOP": true, + "TIOCSTSTAMP": true, + "TIOCSWINSZ": true, + "TIOCTIMESTAMP": true, + "TIOCUCNTL": true, + "TIOCVHANGUP": true, + "TIOCXMTFRAME": true, + "TOKEN_ADJUST_DEFAULT": true, + "TOKEN_ADJUST_GROUPS": true, + "TOKEN_ADJUST_PRIVILEGES": true, + "TOKEN_ADJUST_SESSIONID": true, + "TOKEN_ALL_ACCESS": true, + "TOKEN_ASSIGN_PRIMARY": true, + "TOKEN_DUPLICATE": true, + "TOKEN_EXECUTE": true, + "TOKEN_IMPERSONATE": true, + "TOKEN_QUERY": true, + "TOKEN_QUERY_SOURCE": true, + "TOKEN_READ": true, + "TOKEN_WRITE": true, + "TOSTOP": true, + "TRUNCATE_EXISTING": true, + "TUNATTACHFILTER": true, + "TUNDETACHFILTER": true, + "TUNGETFEATURES": true, + "TUNGETIFF": true, + "TUNGETSNDBUF": true, + "TUNGETVNETHDRSZ": true, + "TUNSETDEBUG": true, + "TUNSETGROUP": true, + "TUNSETIFF": true, + "TUNSETLINK": true, + "TUNSETNOCSUM": true, + "TUNSETOFFLOAD": true, + "TUNSETOWNER": true, + "TUNSETPERSIST": true, + "TUNSETSNDBUF": true, + "TUNSETTXFILTER": true, + "TUNSETVNETHDRSZ": true, + "Tee": true, + "TerminateProcess": true, + "Termios": true, + "Tgkill": true, + "Time": true, + "Time_t": true, + "Times": true, + "Timespec": true, + "TimespecToNsec": true, + "Timeval": true, + "Timeval32": true, + "TimevalToNsec": true, + "Timex": true, + "Timezoneinformation": true, + "Tms": true, + "Token": true, + "TokenAccessInformation": true, + "TokenAuditPolicy": true, + "TokenDefaultDacl": true, + "TokenElevation": true, + "TokenElevationType": true, + "TokenGroups": true, + "TokenGroupsAndPrivileges": true, + "TokenHasRestrictions": true, + "TokenImpersonationLevel": true, + "TokenIntegrityLevel": true, + "TokenLinkedToken": true, + "TokenLogonSid": true, + "TokenMandatoryPolicy": true, + "TokenOrigin": true, + "TokenOwner": true, + "TokenPrimaryGroup": true, + "TokenPrivileges": true, + "TokenRestrictedSids": true, + "TokenSandBoxInert": true, + "TokenSessionId": true, + "TokenSessionReference": true, + "TokenSource": true, + "TokenStatistics": true, + "TokenType": true, + "TokenUIAccess": true, + "TokenUser": true, + "TokenVirtualizationAllowed": true, + "TokenVirtualizationEnabled": true, + "Tokenprimarygroup": true, + "Tokenuser": true, + "TranslateAccountName": true, + "TranslateName": true, + "TransmitFile": true, + "TransmitFileBuffers": true, + "Truncate": true, + "UNIX_PATH_MAX": true, + "USAGE_MATCH_TYPE_AND": true, + "USAGE_MATCH_TYPE_OR": true, + "UTF16FromString": true, + "UTF16PtrFromString": true, + "UTF16ToString": true, + "Ucred": true, + "Umask": true, + "Uname": true, + "Undelete": true, + "UnixCredentials": true, + "UnixRights": true, + "Unlink": true, + "Unlinkat": true, + "UnmapViewOfFile": true, + "Unmount": true, + "Unsetenv": true, + "Unshare": true, + "UserInfo10": true, + "Ustat": true, + "Ustat_t": true, + "Utimbuf": true, + "Utime": true, + "Utimes": true, + "UtimesNano": true, + "Utsname": true, + "VDISCARD": true, + "VDSUSP": true, + "VEOF": true, + "VEOL": true, + "VEOL2": true, + "VERASE": true, + "VERASE2": true, + "VINTR": true, + "VKILL": true, + "VLNEXT": true, + "VMIN": true, + "VQUIT": true, + "VREPRINT": true, + "VSTART": true, + "VSTATUS": true, + "VSTOP": true, + "VSUSP": true, + "VSWTC": true, + "VT0": true, + "VT1": true, + "VTDLY": true, + "VTIME": true, + "VWERASE": true, + "VirtualLock": true, + "VirtualUnlock": true, + "WAIT_ABANDONED": true, + "WAIT_FAILED": true, + "WAIT_OBJECT_0": true, + "WAIT_TIMEOUT": true, + "WALL": true, + "WALLSIG": true, + "WALTSIG": true, + "WCLONE": true, + "WCONTINUED": true, + "WCOREFLAG": true, + "WEXITED": true, + "WLINUXCLONE": true, + "WNOHANG": true, + "WNOTHREAD": true, + "WNOWAIT": true, + "WNOZOMBIE": true, + "WOPTSCHECKED": true, + "WORDSIZE": true, + "WSABuf": true, + "WSACleanup": true, + "WSADESCRIPTION_LEN": true, + "WSAData": true, + "WSAEACCES": true, + "WSAECONNABORTED": true, + "WSAECONNRESET": true, + "WSAEnumProtocols": true, + "WSAID_CONNECTEX": true, + "WSAIoctl": true, + "WSAPROTOCOL_LEN": true, + "WSAProtocolChain": true, + "WSAProtocolInfo": true, + "WSARecv": true, + "WSARecvFrom": true, + "WSASYS_STATUS_LEN": true, + "WSASend": true, + "WSASendTo": true, + "WSASendto": true, + "WSAStartup": true, + "WSTOPPED": true, + "WTRAPPED": true, + "WUNTRACED": true, + "Wait4": true, + "WaitForSingleObject": true, + "WaitStatus": true, + "Win32FileAttributeData": true, + "Win32finddata": true, + "Write": true, + "WriteConsole": true, + "WriteFile": true, + "X509_ASN_ENCODING": true, + "XCASE": true, + "XP1_CONNECTIONLESS": true, + "XP1_CONNECT_DATA": true, + "XP1_DISCONNECT_DATA": true, + "XP1_EXPEDITED_DATA": true, + "XP1_GRACEFUL_CLOSE": true, + "XP1_GUARANTEED_DELIVERY": true, + "XP1_GUARANTEED_ORDER": true, + "XP1_IFS_HANDLES": true, + "XP1_MESSAGE_ORIENTED": true, + "XP1_MULTIPOINT_CONTROL_PLANE": true, + "XP1_MULTIPOINT_DATA_PLANE": true, + "XP1_PARTIAL_MESSAGE": true, + "XP1_PSEUDO_STREAM": true, + "XP1_QOS_SUPPORTED": true, + "XP1_SAN_SUPPORT_SDP": true, + "XP1_SUPPORT_BROADCAST": true, + "XP1_SUPPORT_MULTIPOINT": true, + "XP1_UNI_RECV": true, + "XP1_UNI_SEND": true, + }, + "syscall/js": map[string]bool{ + "CopyBytesToGo": true, + "CopyBytesToJS": true, + "Error": true, + "Func": true, + "FuncOf": true, + "Global": true, + "Null": true, + "Type": true, + "TypeBoolean": true, + "TypeFunction": true, + "TypeNull": true, + "TypeNumber": true, + "TypeObject": true, + "TypeString": true, + "TypeSymbol": true, + "TypeUndefined": true, + "Undefined": true, + "Value": true, + "ValueError": true, + "ValueOf": true, + "Wrapper": true, + }, + "testing": map[string]bool{ + "AllocsPerRun": true, + "B": true, + "Benchmark": true, + "BenchmarkResult": true, + "Cover": true, + "CoverBlock": true, + "CoverMode": true, + "Coverage": true, + "Init": true, + "InternalBenchmark": true, + "InternalExample": true, + "InternalTest": true, + "M": true, + "Main": true, + "MainStart": true, + "PB": true, + "RegisterCover": true, + "RunBenchmarks": true, + "RunExamples": true, + "RunTests": true, + "Short": true, + "T": true, + "TB": true, + "Verbose": true, + }, + "testing/iotest": map[string]bool{ + "DataErrReader": true, + "ErrTimeout": true, + "HalfReader": true, + "NewReadLogger": true, + "NewWriteLogger": true, + "OneByteReader": true, + "TimeoutReader": true, + "TruncateWriter": true, + }, + "testing/quick": map[string]bool{ + "Check": true, + "CheckEqual": true, + "CheckEqualError": true, + "CheckError": true, + "Config": true, + "Generator": true, + "SetupError": true, + "Value": true, + }, + "text/scanner": map[string]bool{ + "Char": true, + "Comment": true, + "EOF": true, + "Float": true, + "GoTokens": true, + "GoWhitespace": true, + "Ident": true, + "Int": true, + "Position": true, + "RawString": true, + "ScanChars": true, + "ScanComments": true, + "ScanFloats": true, + "ScanIdents": true, + "ScanInts": true, + "ScanRawStrings": true, + "ScanStrings": true, + "Scanner": true, + "SkipComments": true, + "String": true, + "TokenString": true, + }, + "text/tabwriter": map[string]bool{ + "AlignRight": true, + "Debug": true, + "DiscardEmptyColumns": true, + "Escape": true, + "FilterHTML": true, + "NewWriter": true, + "StripEscape": true, + "TabIndent": true, + "Writer": true, + }, + "text/template": map[string]bool{ + "ExecError": true, + "FuncMap": true, + "HTMLEscape": true, + "HTMLEscapeString": true, + "HTMLEscaper": true, + "IsTrue": true, + "JSEscape": true, + "JSEscapeString": true, + "JSEscaper": true, + "Must": true, + "New": true, + "ParseFiles": true, + "ParseGlob": true, + "Template": true, + "URLQueryEscaper": true, + }, + "text/template/parse": map[string]bool{ + "ActionNode": true, + "BoolNode": true, + "BranchNode": true, + "ChainNode": true, + "CommandNode": true, + "DotNode": true, + "FieldNode": true, + "IdentifierNode": true, + "IfNode": true, + "IsEmptyTree": true, + "ListNode": true, + "New": true, + "NewIdentifier": true, + "NilNode": true, + "Node": true, + "NodeAction": true, + "NodeBool": true, + "NodeChain": true, + "NodeCommand": true, + "NodeDot": true, + "NodeField": true, + "NodeIdentifier": true, + "NodeIf": true, + "NodeList": true, + "NodeNil": true, + "NodeNumber": true, + "NodePipe": true, + "NodeRange": true, + "NodeString": true, + "NodeTemplate": true, + "NodeText": true, + "NodeType": true, + "NodeVariable": true, + "NodeWith": true, + "NumberNode": true, + "Parse": true, + "PipeNode": true, + "Pos": true, + "RangeNode": true, + "StringNode": true, + "TemplateNode": true, + "TextNode": true, + "Tree": true, + "VariableNode": true, + "WithNode": true, + }, + "time": map[string]bool{ + "ANSIC": true, + "After": true, + "AfterFunc": true, + "April": true, + "August": true, + "Date": true, + "December": true, + "Duration": true, + "February": true, + "FixedZone": true, + "Friday": true, + "Hour": true, + "January": true, + "July": true, + "June": true, + "Kitchen": true, + "LoadLocation": true, + "LoadLocationFromTZData": true, + "Local": true, + "Location": true, + "March": true, + "May": true, + "Microsecond": true, + "Millisecond": true, + "Minute": true, + "Monday": true, + "Month": true, + "Nanosecond": true, + "NewTicker": true, + "NewTimer": true, + "November": true, + "Now": true, + "October": true, + "Parse": true, + "ParseDuration": true, + "ParseError": true, + "ParseInLocation": true, + "RFC1123": true, + "RFC1123Z": true, + "RFC3339": true, + "RFC3339Nano": true, + "RFC822": true, + "RFC822Z": true, + "RFC850": true, + "RubyDate": true, + "Saturday": true, + "Second": true, + "September": true, + "Since": true, + "Sleep": true, + "Stamp": true, + "StampMicro": true, + "StampMilli": true, + "StampNano": true, + "Sunday": true, + "Thursday": true, + "Tick": true, + "Ticker": true, + "Time": true, + "Timer": true, + "Tuesday": true, + "UTC": true, + "Unix": true, + "UnixDate": true, + "Until": true, + "Wednesday": true, + "Weekday": true, + }, + "unicode": map[string]bool{ + "ASCII_Hex_Digit": true, + "Adlam": true, + "Ahom": true, + "Anatolian_Hieroglyphs": true, + "Arabic": true, + "Armenian": true, + "Avestan": true, + "AzeriCase": true, + "Balinese": true, + "Bamum": true, + "Bassa_Vah": true, + "Batak": true, + "Bengali": true, + "Bhaiksuki": true, + "Bidi_Control": true, + "Bopomofo": true, + "Brahmi": true, + "Braille": true, + "Buginese": true, + "Buhid": true, + "C": true, + "Canadian_Aboriginal": true, + "Carian": true, + "CaseRange": true, + "CaseRanges": true, + "Categories": true, + "Caucasian_Albanian": true, + "Cc": true, + "Cf": true, + "Chakma": true, + "Cham": true, + "Cherokee": true, + "Co": true, + "Common": true, + "Coptic": true, + "Cs": true, + "Cuneiform": true, + "Cypriot": true, + "Cyrillic": true, + "Dash": true, + "Deprecated": true, + "Deseret": true, + "Devanagari": true, + "Diacritic": true, + "Digit": true, + "Dogra": true, + "Duployan": true, + "Egyptian_Hieroglyphs": true, + "Elbasan": true, + "Ethiopic": true, + "Extender": true, + "FoldCategory": true, + "FoldScript": true, + "Georgian": true, + "Glagolitic": true, + "Gothic": true, + "Grantha": true, + "GraphicRanges": true, + "Greek": true, + "Gujarati": true, + "Gunjala_Gondi": true, + "Gurmukhi": true, + "Han": true, + "Hangul": true, + "Hanifi_Rohingya": true, + "Hanunoo": true, + "Hatran": true, + "Hebrew": true, + "Hex_Digit": true, + "Hiragana": true, + "Hyphen": true, + "IDS_Binary_Operator": true, + "IDS_Trinary_Operator": true, + "Ideographic": true, + "Imperial_Aramaic": true, + "In": true, + "Inherited": true, + "Inscriptional_Pahlavi": true, + "Inscriptional_Parthian": true, + "Is": true, + "IsControl": true, + "IsDigit": true, + "IsGraphic": true, + "IsLetter": true, + "IsLower": true, + "IsMark": true, + "IsNumber": true, + "IsOneOf": true, + "IsPrint": true, + "IsPunct": true, + "IsSpace": true, + "IsSymbol": true, + "IsTitle": true, + "IsUpper": true, + "Javanese": true, + "Join_Control": true, + "Kaithi": true, + "Kannada": true, + "Katakana": true, + "Kayah_Li": true, + "Kharoshthi": true, + "Khmer": true, + "Khojki": true, + "Khudawadi": true, + "L": true, + "Lao": true, + "Latin": true, + "Lepcha": true, + "Letter": true, + "Limbu": true, + "Linear_A": true, + "Linear_B": true, + "Lisu": true, + "Ll": true, + "Lm": true, + "Lo": true, + "Logical_Order_Exception": true, + "Lower": true, + "LowerCase": true, + "Lt": true, + "Lu": true, + "Lycian": true, + "Lydian": true, + "M": true, + "Mahajani": true, + "Makasar": true, + "Malayalam": true, + "Mandaic": true, + "Manichaean": true, + "Marchen": true, + "Mark": true, + "Masaram_Gondi": true, + "MaxASCII": true, + "MaxCase": true, + "MaxLatin1": true, + "MaxRune": true, + "Mc": true, + "Me": true, + "Medefaidrin": true, + "Meetei_Mayek": true, + "Mende_Kikakui": true, + "Meroitic_Cursive": true, + "Meroitic_Hieroglyphs": true, + "Miao": true, + "Mn": true, + "Modi": true, + "Mongolian": true, + "Mro": true, + "Multani": true, + "Myanmar": true, + "N": true, + "Nabataean": true, + "Nd": true, + "New_Tai_Lue": true, + "Newa": true, + "Nko": true, + "Nl": true, + "No": true, + "Noncharacter_Code_Point": true, + "Number": true, + "Nushu": true, + "Ogham": true, + "Ol_Chiki": true, + "Old_Hungarian": true, + "Old_Italic": true, + "Old_North_Arabian": true, + "Old_Permic": true, + "Old_Persian": true, + "Old_Sogdian": true, + "Old_South_Arabian": true, + "Old_Turkic": true, + "Oriya": true, + "Osage": true, + "Osmanya": true, + "Other": true, + "Other_Alphabetic": true, + "Other_Default_Ignorable_Code_Point": true, + "Other_Grapheme_Extend": true, + "Other_ID_Continue": true, + "Other_ID_Start": true, + "Other_Lowercase": true, + "Other_Math": true, + "Other_Uppercase": true, + "P": true, + "Pahawh_Hmong": true, + "Palmyrene": true, + "Pattern_Syntax": true, + "Pattern_White_Space": true, + "Pau_Cin_Hau": true, + "Pc": true, + "Pd": true, + "Pe": true, + "Pf": true, + "Phags_Pa": true, + "Phoenician": true, + "Pi": true, + "Po": true, + "Prepended_Concatenation_Mark": true, + "PrintRanges": true, + "Properties": true, + "Ps": true, + "Psalter_Pahlavi": true, + "Punct": true, + "Quotation_Mark": true, + "Radical": true, + "Range16": true, + "Range32": true, + "RangeTable": true, + "Regional_Indicator": true, + "Rejang": true, + "ReplacementChar": true, + "Runic": true, + "S": true, + "STerm": true, + "Samaritan": true, + "Saurashtra": true, + "Sc": true, + "Scripts": true, + "Sentence_Terminal": true, + "Sharada": true, + "Shavian": true, + "Siddham": true, + "SignWriting": true, + "SimpleFold": true, + "Sinhala": true, + "Sk": true, + "Sm": true, + "So": true, + "Soft_Dotted": true, + "Sogdian": true, + "Sora_Sompeng": true, + "Soyombo": true, + "Space": true, + "SpecialCase": true, + "Sundanese": true, + "Syloti_Nagri": true, + "Symbol": true, + "Syriac": true, + "Tagalog": true, + "Tagbanwa": true, + "Tai_Le": true, + "Tai_Tham": true, + "Tai_Viet": true, + "Takri": true, + "Tamil": true, + "Tangut": true, + "Telugu": true, + "Terminal_Punctuation": true, + "Thaana": true, + "Thai": true, + "Tibetan": true, + "Tifinagh": true, + "Tirhuta": true, + "Title": true, + "TitleCase": true, + "To": true, + "ToLower": true, + "ToTitle": true, + "ToUpper": true, + "TurkishCase": true, + "Ugaritic": true, + "Unified_Ideograph": true, + "Upper": true, + "UpperCase": true, + "UpperLower": true, + "Vai": true, + "Variation_Selector": true, + "Version": true, + "Warang_Citi": true, + "White_Space": true, + "Yi": true, + "Z": true, + "Zanabazar_Square": true, + "Zl": true, + "Zp": true, + "Zs": true, + }, + "unicode/utf16": map[string]bool{ + "Decode": true, + "DecodeRune": true, + "Encode": true, + "EncodeRune": true, + "IsSurrogate": true, + }, + "unicode/utf8": map[string]bool{ + "DecodeLastRune": true, + "DecodeLastRuneInString": true, + "DecodeRune": true, + "DecodeRuneInString": true, + "EncodeRune": true, + "FullRune": true, + "FullRuneInString": true, + "MaxRune": true, + "RuneCount": true, + "RuneCountInString": true, + "RuneError": true, + "RuneLen": true, + "RuneSelf": true, + "RuneStart": true, + "UTFMax": true, + "Valid": true, + "ValidRune": true, + "ValidString": true, + }, + "unsafe": map[string]bool{ + "Alignof": true, + "ArbitraryType": true, + "Offsetof": true, + "Pointer": true, + "Sizeof": true, + }, +} diff --git a/vendor/golang.org/x/tools/internal/module/module.go b/vendor/golang.org/x/tools/internal/module/module.go new file mode 100644 index 00000000..9a4edb9d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/module/module.go @@ -0,0 +1,540 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type +// along with support code. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "fmt" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/internal/semver" +) + +// A Version is defined by a module path and version pair. +type Version struct { + Path string + + // Version is usually a semantic version in canonical form. + // There are two exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + Version string `json:",omitempty"` +} + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return fmt.Errorf("malformed semantic version %v", version) + } + _, pathMajor, _ := SplitPathVersion(path) + if !MatchPathMajor(version, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + if pathMajor[0] == '.' { // .v1 + pathMajor = pathMajor[1:] + } + return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor) + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// pathOK reports whether r can appear in an import path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// This matches what "go get" has historically recognized in import paths. +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see note below). +func pathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "safe encoding" below. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + for i := 0; i < len(allowed); i++ { + if rune(allowed[i]) == r { + return true + } + } + return false + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +func CheckPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed module path %q: %v", path, err) + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("malformed module path %q: leading slash", path) + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("malformed module path %q: missing dot in first path element", path) + } + if path[0] == '-' { + return fmt.Errorf("malformed module path %q: leading dash in first path element", path) + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("malformed module path %q: invalid version", path) + } + return nil +} + +// CheckImportPath checks that an import path is valid. +func CheckImportPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed import path %q: %v", path, err) + } + return nil +} + +// checkPath checks that a general path is valid. +// It returns an error describing why but not mentioning path. +// Because these checks apply to both module paths and import paths, +// the caller is expected to add the "malformed ___ path %q: " prefix. +// fileName indicates whether the final element of the path is a file name +// (as opposed to a directory name). +func checkPath(path string, fileName bool) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if strings.Contains(path, "..") { + return fmt.Errorf("double dot") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], fileName); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], fileName); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +// fileName indicates whether the element is a file name (not a directory name). +func checkElem(elem string, fileName bool) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && !fileName { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + charOK := pathOK + if fileName { + charOK = fileNameOK + } + for _, r := range elem { + if !charOK(r) { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("disallowed path element %q", elem) + } + } + return nil +} + +// CheckFilePath checks whether a slash-separated file path is valid. +func CheckFilePath(path string) error { + if err := checkPath(path, true); err != nil { + return fmt.Errorf("malformed file path %q: %v", path, err) + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +func MatchPathMajor(v, pathMajor string) bool { + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return true + } + m := semver.Major(v) + if pathMajor == "" { + return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" + } + return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:] +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing Versions. +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// Safe encodings +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the safe encoding be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe encoding that +// leaves most paths unaltered. +// +// The safe encoding is this: +// replace every uppercase letter with an exclamation mark +// followed by the letter's lowercase equivalent. +// +// For example, +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the safe encoding is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to encode a literal !. +// +// Although paths are disallowed from using Unicode (see pathOK above), +// the eventual plan is to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention. Note however that not all runes that +// are different but case-fold equivalent are an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are considered to case-fold to each other. When we do add Unicode +// letters, we must not assume that upper/lower are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would encode as "!!k", or perhaps as "(212A)". +// +// Also, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. + +// EncodePath returns the safe encoding of the given module path. +// It fails if the module path is invalid. +func EncodePath(path string) (encoding string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return encodeString(path) +} + +// EncodeVersion returns the safe encoding of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EncodeVersion(v string) (encoding string, err error) { + if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { + return "", fmt.Errorf("disallowed version string %q", v) + } + return encodeString(v) +} + +func encodeString(s string) (encoding string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the encoding loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EncodePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// DecodePath returns the module path of the given safe encoding. +// It fails if the encoding is invalid or encodes an invalid path. +func DecodePath(encoding string) (path string, err error) { + path, ok := decodeString(encoding) + if !ok { + return "", fmt.Errorf("invalid module path encoding %q", encoding) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err) + } + return path, nil +} + +// DecodeVersion returns the version string for the given safe encoding. +// It fails if the encoding is invalid or encodes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func DecodeVersion(encoding string) (v string, err error) { + v, ok := decodeString(encoding) + if !ok { + return "", fmt.Errorf("invalid version encoding %q", encoding) + } + if err := checkElem(v, true); err != nil { + return "", fmt.Errorf("disallowed version string %q", v) + } + return v, nil +} + +func decodeString(encoding string) (string, bool) { + var buf []byte + + bang := false + for _, r := range encoding { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go new file mode 100644 index 00000000..4af7118e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/gonum.org/v1/gonum/AUTHORS b/vendor/gonum.org/v1/gonum/AUTHORS new file mode 100644 index 00000000..417db9b8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/AUTHORS @@ -0,0 +1,89 @@ +# This is the official list of gonum authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Alexander Egurnov +Bill Gray +Bill Noon +Brendan Tracey +Brent Pedersen +Chad Kunde +Chih-Wei Chang +Chris Tessum +Christophe Meessen +Clayton Northey +Dan Kortschak +Daniel Fireman +David Samborski +Davor Kapsa +DeepMind Technologies +Dezmond Goff +Egon Elbre +Ekaterina Efimova +Ethan Burns +Evert Lammerts +Facundo Gaich +Fazlul Shahriar +Francesc Campoy +Google Inc +Gustaf Johansson +Iakov Davydov +Igor Mikushkin +Iskander Sharipov +Jalem Raj Rohit +James Bell +James Bowman +James Holmes <32bitkid@gmail.com> +Janne Snabb +Jeff Juozapaitis +Jeremy Atkinson +Jonas Kahler +Jonas Schulze +Jonathan J Lawlor +Jonathan Schroeder +Joseph Watson +Josh Wilson +Julien Roland +Kai Trukenmüller +Kent English +Kevin C. Zimmerman +Kirill Motkov +Konstantin Shaposhnikov +Leonid Kneller +Lyron Winderbaum +Martin Diz +Matthieu Di Mercurio +Max Halford +MinJae Kwon +Nick Potts +Olivier Wulveryck +Or Rikon +Pontus Melke +Renée French +Rishi Desai +Robin Eklind +Sam Zaydel +Samuel Kelemen +Saran Ahluwalia +Scott Holden +Sebastien Binet +Shawn Smith +source{d} +Spencer Lyon +Steve McCoy +Taesu Pyo +Takeshi Yoneda +The University of Adelaide +The University of Minnesota +The University of Washington +Thomas Berg +Tobin Harding +Vincent Thiery +Vladimír Chalupecký +Yevgeniy Vahlis diff --git a/vendor/gonum.org/v1/gonum/CONTRIBUTORS b/vendor/gonum.org/v1/gonum/CONTRIBUTORS new file mode 100644 index 00000000..0c7cc46a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/CONTRIBUTORS @@ -0,0 +1,91 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the gonum +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees would be listed here +# but not in AUTHORS, because Google would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Alexander Egurnov +Andrew Brampton +Bill Gray +Bill Noon +Brendan Tracey +Brent Pedersen +Chad Kunde +Chih-Wei Chang +Chris Tessum +Christophe Meessen +Clayton Northey +Dan Kortschak +Daniel Fireman +David Samborski +Davor Kapsa +Dezmond Goff +Egon Elbre +Ekaterina Efimova +Ethan Burns +Evert Lammerts +Facundo Gaich +Fazlul Shahriar +Francesc Campoy +Gustaf Johansson +Iakov Davydov +Igor Mikushkin +Iskander Sharipov +Jalem Raj Rohit +James Bell +James Bowman +James Holmes <32bitkid@gmail.com> +Janne Snabb +Jeff Juozapaitis +Jeremy Atkinson +Jonas Kahler +Jonas Schulze +Jonathan J Lawlor +Jonathan Schroeder +Joseph Watson +Josh Wilson +Julien Roland +Kai Trukenmüller +Kent English +Kevin C. Zimmerman +Kirill Motkov +Konstantin Shaposhnikov +Leonid Kneller +Lyron Winderbaum +Martin Diz +Matthieu Di Mercurio +Max Halford +MinJae Kwon +Nick Potts +Olivier Wulveryck +Or Rikon +Pontus Melke +Renée French +Rishi Desai +Robin Eklind +Sam Zaydel +Samuel Kelemen +Saran Ahluwalia +Scott Holden +Sebastien Binet +Shawn Smith +Spencer Lyon +Steve McCoy +Taesu Pyo +Takeshi Yoneda +Thomas Berg +Tobin Harding +Vincent Thiery +Vladimír Chalupecký +Yevgeniy Vahlis diff --git a/vendor/gonum.org/v1/gonum/LICENSE b/vendor/gonum.org/v1/gonum/LICENSE new file mode 100644 index 00000000..5f1c3f9c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/LICENSE @@ -0,0 +1,23 @@ +Copyright ©2013 The Gonum Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/blas/README.md b/vendor/gonum.org/v1/gonum/blas/README.md new file mode 100644 index 00000000..e9d33eee --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/README.md @@ -0,0 +1,47 @@ +# Gonum BLAS [![GoDoc](https://godoc.org/gonum.org/v1/gonum/blas?status.svg)](https://godoc.org/gonum.org/v1/gonum/blas) + +A collection of packages to provide BLAS functionality for the [Go programming +language](http://golang.org) + +## Installation +```sh + go get gonum.org/v1/gonum/blas/... +``` + +## Packages + +### blas + +Defines [BLAS API](http://www.netlib.org/blas/blast-forum/cinterface.pdf) split in several +interfaces. + +### blas/gonum + +Go implementation of the BLAS API (incomplete, implements the `float32` and `float64` API). + +### blas/blas64 and blas/blas32 + +Wrappers for an implementation of the double (i.e., `float64`) and single (`float32`) +precision real parts of the BLAS API. + +```Go +package main + +import ( + "fmt" + + "gonum.org/v1/gonum/blas/blas64" +) + +func main() { + v := blas64.Vector{Inc: 1, Data: []float64{1, 1, 1}} + fmt.Println("v has length:", blas64.Nrm2(len(v.Data), v)) +} +``` + +### blas/cblas128 and blas/cblas64 + +Wrappers for an implementation of the double (i.e., `complex128`) and single (`complex64`) +precision complex parts of the blas API. + +Currently blas/cblas64 and blas/cblas128 require gonum.org/v1/netlib/blas. diff --git a/vendor/gonum.org/v1/gonum/blas/blas.go b/vendor/gonum.org/v1/gonum/blas/blas.go new file mode 100644 index 00000000..9b933e3f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas.go @@ -0,0 +1,283 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate ./conversions.bash + +package blas + +// Flag constants indicate Givens transformation H matrix state. +type Flag int + +const ( + Identity Flag = -2 // H is the identity matrix; no rotation is needed. + Rescaling Flag = -1 // H specifies rescaling. + OffDiagonal Flag = 0 // Off-diagonal elements of H are non-unit. + Diagonal Flag = 1 // Diagonal elements of H are non-unit. +) + +// SrotmParams contains Givens transformation parameters returned +// by the Float32 Srotm method. +type SrotmParams struct { + Flag + H [4]float32 // Column-major 2 by 2 matrix. +} + +// DrotmParams contains Givens transformation parameters returned +// by the Float64 Drotm method. +type DrotmParams struct { + Flag + H [4]float64 // Column-major 2 by 2 matrix. +} + +// Transpose specifies the transposition operation of a matrix. +type Transpose byte + +const ( + NoTrans Transpose = 'N' + Trans Transpose = 'T' + ConjTrans Transpose = 'C' +) + +// Uplo specifies whether a matrix is upper or lower triangular. +type Uplo byte + +const ( + Upper Uplo = 'U' + Lower Uplo = 'L' + All Uplo = 'A' +) + +// Diag specifies whether a matrix is unit triangular. +type Diag byte + +const ( + NonUnit Diag = 'N' + Unit Diag = 'U' +) + +// Side specifies from which side a multiplication operation is performed. +type Side byte + +const ( + Left Side = 'L' + Right Side = 'R' +) + +// Float32 implements the single precision real BLAS routines. +type Float32 interface { + Float32Level1 + Float32Level2 + Float32Level3 +} + +// Float32Level1 implements the single precision real BLAS Level 1 routines. +type Float32Level1 interface { + Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 + Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 + Sdot(n int, x []float32, incX int, y []float32, incY int) float32 + Snrm2(n int, x []float32, incX int) float32 + Sasum(n int, x []float32, incX int) float32 + Isamax(n int, x []float32, incX int) int + Sswap(n int, x []float32, incX int, y []float32, incY int) + Scopy(n int, x []float32, incX int, y []float32, incY int) + Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) + Srotg(a, b float32) (c, s, r, z float32) + Srotmg(d1, d2, b1, b2 float32) (p SrotmParams, rd1, rd2, rb1 float32) + Srot(n int, x []float32, incX int, y []float32, incY int, c, s float32) + Srotm(n int, x []float32, incX int, y []float32, incY int, p SrotmParams) + Sscal(n int, alpha float32, x []float32, incX int) +} + +// Float32Level2 implements the single precision real BLAS Level 2 routines. +type Float32Level2 interface { + Sgemv(tA Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Sgbmv(tA Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Strmv(ul Uplo, tA Transpose, d Diag, n int, a []float32, lda int, x []float32, incX int) + Stbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []float32, lda int, x []float32, incX int) + Stpmv(ul Uplo, tA Transpose, d Diag, n int, ap []float32, x []float32, incX int) + Strsv(ul Uplo, tA Transpose, d Diag, n int, a []float32, lda int, x []float32, incX int) + Stbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []float32, lda int, x []float32, incX int) + Stpsv(ul Uplo, tA Transpose, d Diag, n int, ap []float32, x []float32, incX int) + Ssymv(ul Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Ssbmv(ul Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Sspmv(ul Uplo, n int, alpha float32, ap []float32, x []float32, incX int, beta float32, y []float32, incY int) + Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) + Ssyr(ul Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) + Sspr(ul Uplo, n int, alpha float32, x []float32, incX int, ap []float32) + Ssyr2(ul Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) + Sspr2(ul Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32) +} + +// Float32Level3 implements the single precision real BLAS Level 3 routines. +type Float32Level3 interface { + Sgemm(tA, tB Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Ssymm(s Side, ul Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Ssyrk(ul Uplo, t Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) + Ssyr2k(ul Uplo, t Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Strmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) + Strsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) +} + +// Float64 implements the single precision real BLAS routines. +type Float64 interface { + Float64Level1 + Float64Level2 + Float64Level3 +} + +// Float64Level1 implements the double precision real BLAS Level 1 routines. +type Float64Level1 interface { + Ddot(n int, x []float64, incX int, y []float64, incY int) float64 + Dnrm2(n int, x []float64, incX int) float64 + Dasum(n int, x []float64, incX int) float64 + Idamax(n int, x []float64, incX int) int + Dswap(n int, x []float64, incX int, y []float64, incY int) + Dcopy(n int, x []float64, incX int, y []float64, incY int) + Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) + Drotg(a, b float64) (c, s, r, z float64) + Drotmg(d1, d2, b1, b2 float64) (p DrotmParams, rd1, rd2, rb1 float64) + Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) + Drotm(n int, x []float64, incX int, y []float64, incY int, p DrotmParams) + Dscal(n int, alpha float64, x []float64, incX int) +} + +// Float64Level2 implements the double precision real BLAS Level 2 routines. +type Float64Level2 interface { + Dgemv(tA Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dgbmv(tA Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dtrmv(ul Uplo, tA Transpose, d Diag, n int, a []float64, lda int, x []float64, incX int) + Dtbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []float64, lda int, x []float64, incX int) + Dtpmv(ul Uplo, tA Transpose, d Diag, n int, ap []float64, x []float64, incX int) + Dtrsv(ul Uplo, tA Transpose, d Diag, n int, a []float64, lda int, x []float64, incX int) + Dtbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []float64, lda int, x []float64, incX int) + Dtpsv(ul Uplo, tA Transpose, d Diag, n int, ap []float64, x []float64, incX int) + Dsymv(ul Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dsbmv(ul Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dspmv(ul Uplo, n int, alpha float64, ap []float64, x []float64, incX int, beta float64, y []float64, incY int) + Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) + Dsyr(ul Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) + Dspr(ul Uplo, n int, alpha float64, x []float64, incX int, ap []float64) + Dsyr2(ul Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) + Dspr2(ul Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64) +} + +// Float64Level3 implements the double precision real BLAS Level 3 routines. +type Float64Level3 interface { + Dgemm(tA, tB Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dsymm(s Side, ul Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dsyrk(ul Uplo, t Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) + Dsyr2k(ul Uplo, t Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dtrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) + Dtrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) +} + +// Complex64 implements the single precision complex BLAS routines. +type Complex64 interface { + Complex64Level1 + Complex64Level2 + Complex64Level3 +} + +// Complex64Level1 implements the single precision complex BLAS Level 1 routines. +type Complex64Level1 interface { + Cdotu(n int, x []complex64, incX int, y []complex64, incY int) (dotu complex64) + Cdotc(n int, x []complex64, incX int, y []complex64, incY int) (dotc complex64) + Scnrm2(n int, x []complex64, incX int) float32 + Scasum(n int, x []complex64, incX int) float32 + Icamax(n int, x []complex64, incX int) int + Cswap(n int, x []complex64, incX int, y []complex64, incY int) + Ccopy(n int, x []complex64, incX int, y []complex64, incY int) + Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) + Cscal(n int, alpha complex64, x []complex64, incX int) + Csscal(n int, alpha float32, x []complex64, incX int) +} + +// Complex64Level2 implements the single precision complex BLAS routines Level 2 routines. +type Complex64Level2 interface { + Cgemv(tA Transpose, m, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Cgbmv(tA Transpose, m, n, kL, kU int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Ctrmv(ul Uplo, tA Transpose, d Diag, n int, a []complex64, lda int, x []complex64, incX int) + Ctbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex64, lda int, x []complex64, incX int) + Ctpmv(ul Uplo, tA Transpose, d Diag, n int, ap []complex64, x []complex64, incX int) + Ctrsv(ul Uplo, tA Transpose, d Diag, n int, a []complex64, lda int, x []complex64, incX int) + Ctbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex64, lda int, x []complex64, incX int) + Ctpsv(ul Uplo, tA Transpose, d Diag, n int, ap []complex64, x []complex64, incX int) + Chemv(ul Uplo, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Chbmv(ul Uplo, n, k int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Chpmv(ul Uplo, n int, alpha complex64, ap []complex64, x []complex64, incX int, beta complex64, y []complex64, incY int) + Cgeru(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Cgerc(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Cher(ul Uplo, n int, alpha float32, x []complex64, incX int, a []complex64, lda int) + Chpr(ul Uplo, n int, alpha float32, x []complex64, incX int, a []complex64) + Cher2(ul Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Chpr2(ul Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, ap []complex64) +} + +// Complex64Level3 implements the single precision complex BLAS Level 3 routines. +type Complex64Level3 interface { + Cgemm(tA, tB Transpose, m, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Csymm(s Side, ul Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Csyrk(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, beta complex64, c []complex64, ldc int) + Csyr2k(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Ctrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) + Ctrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) + Chemm(s Side, ul Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Cherk(ul Uplo, t Transpose, n, k int, alpha float32, a []complex64, lda int, beta float32, c []complex64, ldc int) + Cher2k(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta float32, c []complex64, ldc int) +} + +// Complex128 implements the double precision complex BLAS routines. +type Complex128 interface { + Complex128Level1 + Complex128Level2 + Complex128Level3 +} + +// Complex128Level1 implements the double precision complex BLAS Level 1 routines. +type Complex128Level1 interface { + Zdotu(n int, x []complex128, incX int, y []complex128, incY int) (dotu complex128) + Zdotc(n int, x []complex128, incX int, y []complex128, incY int) (dotc complex128) + Dznrm2(n int, x []complex128, incX int) float64 + Dzasum(n int, x []complex128, incX int) float64 + Izamax(n int, x []complex128, incX int) int + Zswap(n int, x []complex128, incX int, y []complex128, incY int) + Zcopy(n int, x []complex128, incX int, y []complex128, incY int) + Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) + Zscal(n int, alpha complex128, x []complex128, incX int) + Zdscal(n int, alpha float64, x []complex128, incX int) +} + +// Complex128Level2 implements the double precision complex BLAS Level 2 routines. +type Complex128Level2 interface { + Zgemv(tA Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zgbmv(tA Transpose, m, n int, kL int, kU int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Ztrmv(ul Uplo, tA Transpose, d Diag, n int, a []complex128, lda int, x []complex128, incX int) + Ztbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex128, lda int, x []complex128, incX int) + Ztpmv(ul Uplo, tA Transpose, d Diag, n int, ap []complex128, x []complex128, incX int) + Ztrsv(ul Uplo, tA Transpose, d Diag, n int, a []complex128, lda int, x []complex128, incX int) + Ztbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex128, lda int, x []complex128, incX int) + Ztpsv(ul Uplo, tA Transpose, d Diag, n int, ap []complex128, x []complex128, incX int) + Zhemv(ul Uplo, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zhbmv(ul Uplo, n, k int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zhpmv(ul Uplo, n int, alpha complex128, ap []complex128, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zher(ul Uplo, n int, alpha float64, x []complex128, incX int, a []complex128, lda int) + Zhpr(ul Uplo, n int, alpha float64, x []complex128, incX int, a []complex128) + Zher2(ul Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zhpr2(ul Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, ap []complex128) +} + +// Complex128Level3 implements the double precision complex BLAS Level 3 routines. +type Complex128Level3 interface { + Zgemm(tA, tB Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zsymm(s Side, ul Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zsyrk(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) + Zsyr2k(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Ztrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) + Ztrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) + Zhemm(s Side, ul Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zherk(ul Uplo, t Transpose, n, k int, alpha float64, a []complex128, lda int, beta float64, c []complex128, ldc int) + Zher2k(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta float64, c []complex128, ldc int) +} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go b/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go new file mode 100644 index 00000000..55198383 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go @@ -0,0 +1,469 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/gonum" +) + +var blas64 blas.Float64 = gonum.Implementation{} + +// Use sets the BLAS float64 implementation to be used by subsequent BLAS calls. +// The default implementation is +// gonum.org/v1/gonum/blas/gonum.Implementation. +func Use(b blas.Float64) { + blas64 = b +} + +// Implementation returns the current BLAS float64 implementation. +// +// Implementation allows direct calls to the current the BLAS float64 implementation +// giving finer control of parameters. +func Implementation() blas.Float64 { + return blas64 +} + +// Vector represents a vector with an associated element increment. +type Vector struct { + N int + Data []float64 + Inc int +} + +// General represents a matrix using the conventional storage scheme. +type General struct { + Rows, Cols int + Data []float64 + Stride int +} + +// Band represents a band matrix using the band storage scheme. +type Band struct { + Rows, Cols int + KL, KU int + Data []float64 + Stride int +} + +// Triangular represents a triangular matrix using the conventional storage scheme. +type Triangular struct { + Uplo blas.Uplo + Diag blas.Diag + N int + Data []float64 + Stride int +} + +// TriangularBand represents a triangular matrix using the band storage scheme. +type TriangularBand struct { + Uplo blas.Uplo + Diag blas.Diag + N, K int + Data []float64 + Stride int +} + +// TriangularPacked represents a triangular matrix using the packed storage scheme. +type TriangularPacked struct { + Uplo blas.Uplo + Diag blas.Diag + N int + Data []float64 +} + +// Symmetric represents a symmetric matrix using the conventional storage scheme. +type Symmetric struct { + Uplo blas.Uplo + N int + Data []float64 + Stride int +} + +// SymmetricBand represents a symmetric matrix using the band storage scheme. +type SymmetricBand struct { + Uplo blas.Uplo + N, K int + Data []float64 + Stride int +} + +// SymmetricPacked represents a symmetric matrix using the packed storage scheme. +type SymmetricPacked struct { + Uplo blas.Uplo + N int + Data []float64 +} + +// Level 1 + +const ( + negInc = "blas64: negative vector increment" + badLength = "blas64: vector length mismatch" +) + +// Dot computes the dot product of the two vectors: +// \sum_i x[i]*y[i]. +func Dot(x, y Vector) float64 { + if x.N != y.N { + panic(badLength) + } + return blas64.Ddot(x.N, x.Data, x.Inc, y.Data, y.Inc) +} + +// Nrm2 computes the Euclidean norm of the vector x: +// sqrt(\sum_i x[i]*x[i]). +// +// Nrm2 will panic if the vector increment is negative. +func Nrm2(x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Dnrm2(x.N, x.Data, x.Inc) +} + +// Asum computes the sum of the absolute values of the elements of x: +// \sum_i |x[i]|. +// +// Asum will panic if the vector increment is negative. +func Asum(x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Dasum(x.N, x.Data, x.Inc) +} + +// Iamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Iamax returns -1 if n == 0. +// +// Iamax will panic if the vector increment is negative. +func Iamax(x Vector) int { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Idamax(x.N, x.Data, x.Inc) +} + +// Swap exchanges the elements of the two vectors: +// x[i], y[i] = y[i], x[i] for all i. +func Swap(x, y Vector) { + if x.N != y.N { + panic(badLength) + } + blas64.Dswap(x.N, x.Data, x.Inc, y.Data, y.Inc) +} + +// Copy copies the elements of x into the elements of y: +// y[i] = x[i] for all i. +// Copy requires that the lengths of x and y match and will panic otherwise. +func Copy(x, y Vector) { + if x.N != y.N { + panic(badLength) + } + blas64.Dcopy(x.N, x.Data, x.Inc, y.Data, y.Inc) +} + +// Axpy adds x scaled by alpha to y: +// y[i] += alpha*x[i] for all i. +func Axpy(alpha float64, x, y Vector) { + if x.N != y.N { + panic(badLength) + } + blas64.Daxpy(x.N, alpha, x.Data, x.Inc, y.Data, y.Inc) +} + +// Rotg computes the parameters of a Givens plane rotation so that +// ⎡ c s⎤ ⎡a⎤ ⎡r⎤ +// ⎣-s c⎦ * ⎣b⎦ = ⎣0⎦ +// where a and b are the Cartesian coordinates of a given point. +// c, s, and r are defined as +// r = ±Sqrt(a^2 + b^2), +// c = a/r, the cosine of the rotation angle, +// s = a/r, the sine of the rotation angle, +// and z is defined such that +// if |a| > |b|, z = s, +// otherwise if c != 0, z = 1/c, +// otherwise z = 1. +func Rotg(a, b float64) (c, s, r, z float64) { + return blas64.Drotg(a, b) +} + +// Rotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +func Rotmg(d1, d2, b1, b2 float64) (p blas.DrotmParams, rd1, rd2, rb1 float64) { + return blas64.Drotmg(d1, d2, b1, b2) +} + +// Rot applies a plane transformation to n points represented by the vectors x +// and y: +// x[i] = c*x[i] + s*y[i], +// y[i] = -s*x[i] + c*y[i], for all i. +func Rot(x, y Vector, c, s float64) { + if x.N != y.N { + panic(badLength) + } + blas64.Drot(x.N, x.Data, x.Inc, y.Data, y.Inc, c, s) +} + +// Rotm applies the modified Givens rotation to n points represented by the +// vectors x and y. +func Rotm(x, y Vector, p blas.DrotmParams) { + if x.N != y.N { + panic(badLength) + } + blas64.Drotm(x.N, x.Data, x.Inc, y.Data, y.Inc, p) +} + +// Scal scales the vector x by alpha: +// x[i] *= alpha for all i. +// +// Scal will panic if the vector increment is negative. +func Scal(alpha float64, x Vector) { + if x.Inc < 0 { + panic(negInc) + } + blas64.Dscal(x.N, alpha, x.Data, x.Inc) +} + +// Level 2 + +// Gemv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans or blas.ConjTrans, +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func Gemv(t blas.Transpose, alpha float64, a General, x Vector, beta float64, y Vector) { + blas64.Dgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Gbmv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans or blas.ConjTrans, +// where A is an m×n band matrix, x and y are vectors, and alpha and beta are scalars. +func Gbmv(t blas.Transpose, alpha float64, a Band, x Vector, beta float64, y Vector) { + blas64.Dgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Trmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix, and x is a vector. +func Trmv(t blas.Transpose, a Triangular, x Vector) { + blas64.Dtrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular band matrix, and x is a vector. +func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { + blas64.Dtbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix in packed format, and x is a vector. +func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { + blas64.Dtpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Trsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Trsv(t blas.Transpose, a Triangular, x Vector) { + blas64.Dtrsv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular band matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { + blas64.Dtbsv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix in packed format, and x and b are +// vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { + blas64.Dtpsv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Symv computes +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +func Symv(alpha float64, a Symmetric, x Vector, beta float64, y Vector) { + blas64.Dsymv(a.Uplo, a.N, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Sbmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric band matrix, x and y are vectors, and alpha +// and beta are scalars. +func Sbmv(alpha float64, a SymmetricBand, x Vector, beta float64, y Vector) { + blas64.Dsbmv(a.Uplo, a.N, a.K, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Spmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +func Spmv(alpha float64, a SymmetricPacked, x Vector, beta float64, y Vector) { + blas64.Dspmv(a.Uplo, a.N, alpha, a.Data, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Ger performs a rank-1 update +// A += alpha * x * y^T, +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(alpha float64, x, y Vector, a General) { + blas64.Dger(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Syr performs a rank-1 update +// A += alpha * x * x^T, +// where A is an n×n symmetric matrix, x is a vector, and alpha is a scalar. +func Syr(alpha float64, x Vector, a Symmetric) { + blas64.Dsyr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) +} + +// Spr performs the rank-1 update +// A += alpha * x * x^T, +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +func Spr(alpha float64, x Vector, a SymmetricPacked) { + blas64.Dspr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data) +} + +// Syr2 performs a rank-2 update +// A += alpha * x * y^T + alpha * y * x^T, +// where A is a symmetric n×n matrix, x and y are vectors, and alpha is a scalar. +func Syr2(alpha float64, x, y Vector, a Symmetric) { + blas64.Dsyr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Spr2 performs a rank-2 update +// A += alpha * x * y^T + alpha * y * x^T, +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +func Spr2(alpha float64, x, y Vector, a SymmetricPacked) { + blas64.Dspr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data) +} + +// Level 3 + +// Gemm computes +// C = alpha * A * B + beta * C, +// where A, B, and C are dense matrices, and alpha and beta are scalars. +// tA and tB specify whether A or B are transposed. +func Gemm(tA, tB blas.Transpose, alpha float64, a, b General, beta float64, c General) { + var m, n, k int + if tA == blas.NoTrans { + m, k = a.Rows, a.Cols + } else { + m, k = a.Cols, a.Rows + } + if tB == blas.NoTrans { + n = b.Cols + } else { + n = b.Rows + } + blas64.Dgemm(tA, tB, m, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Symm performs +// C = alpha * A * B + beta * C, if s == blas.Left, +// C = alpha * B * A + beta * C, if s == blas.Right, +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and +// alpha is a scalar. +func Symm(s blas.Side, alpha float64, a Symmetric, b General, beta float64, c General) { + var m, n int + if s == blas.Left { + m, n = a.N, b.Cols + } else { + m, n = b.Rows, a.N + } + blas64.Dsymm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Syrk performs a symmetric rank-k update +// C = alpha * A * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * A + beta * C, if t == blas.Trans or blas.ConjTrans, +// where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans and +// a k×n matrix otherwise, and alpha and beta are scalars. +func Syrk(t blas.Transpose, alpha float64, a General, beta float64, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + blas64.Dsyrk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) +} + +// Syr2k performs a symmetric rank-2k update +// C = alpha * A * B^T + alpha * B * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * B + alpha * B^T * A + beta * C, if t == blas.Trans or blas.ConjTrans, +// where C is an n×n symmetric matrix, A and B are n×k matrices if t == NoTrans +// and k×n matrices otherwise, and alpha and beta are scalars. +func Syr2k(t blas.Transpose, alpha float64, a, b General, beta float64, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + blas64.Dsyr2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Trmm performs +// B = alpha * A * B, if tA == blas.NoTrans and s == blas.Left, +// B = alpha * A^T * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// B = alpha * B * A, if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * A^T, if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is +// a scalar. +func Trmm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { + blas64.Dtrmm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} + +// Trsm solves +// A * X = alpha * B, if tA == blas.NoTrans and s == blas.Left, +// A^T * X = alpha * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// X * A = alpha * B, if tA == blas.NoTrans and s == blas.Right, +// X * A^T = alpha * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and +// alpha is a scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +func Trsm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { + blas64.Dtrsm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/conv.go b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go new file mode 100644 index 00000000..882fd8a7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go @@ -0,0 +1,277 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import "gonum.org/v1/gonum/blas" + +// GeneralCols represents a matrix using the conventional column-major storage scheme. +type GeneralCols General + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t GeneralCols) From(a General) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if len(t.Data) < (t.Cols-1)*t.Stride+t.Rows { + panic("blas64: short data slice") + } + for i := 0; i < a.Rows; i++ { + for j, v := range a.Data[i*a.Stride : i*a.Stride+a.Cols] { + t.Data[i+j*t.Stride] = v + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t General) From(a GeneralCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if len(t.Data) < (t.Rows-1)*t.Stride+t.Cols { + panic("blas64: short data slice") + } + for j := 0; j < a.Cols; j++ { + for i, v := range a.Data[j*a.Stride : j*a.Stride+a.Rows] { + t.Data[i*t.Stride+j] = v + } + } +} + +// TriangularCols represents a matrix using the conventional column-major storage scheme. +type TriangularCols Triangular + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t TriangularCols) From(a Triangular) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t Triangular) From(a TriangularCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// BandCols represents a matrix using the band column-major storage scheme. +type BandCols Band + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t BandCols) From(a Band) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("blas64: short stride for destination") + } + for i := 0; i < a.Rows; i++ { + for j := max(0, i-a.KL); j < min(i+a.KU+1, a.Cols); j++ { + t.Data[i+t.KU-j+j*t.Stride] = a.Data[j+a.KL-i+i*a.Stride] + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t Band) From(a BandCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("blas64: short stride for destination") + } + for j := 0; j < a.Cols; j++ { + for i := max(0, j-a.KU); i < min(j+a.KL+1, a.Rows); i++ { + t.Data[j+a.KL-i+i*a.Stride] = a.Data[i+t.KU-j+j*t.Stride] + } + } +} + +// TriangularBandCols represents a symmetric matrix using the band column-major storage scheme. +type TriangularBandCols TriangularBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBandCols) From(a TriangularBand) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBand) From(a TriangularBandCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go b/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go new file mode 100644 index 00000000..5146f1a1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go @@ -0,0 +1,153 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import "gonum.org/v1/gonum/blas" + +// SymmetricCols represents a matrix using the conventional column-major storage scheme. +type SymmetricCols Symmetric + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t SymmetricCols) From(a Symmetric) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t Symmetric) From(a SymmetricCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// SymmetricBandCols represents a symmetric matrix using the band column-major storage scheme. +type SymmetricBandCols SymmetricBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBandCols) From(a SymmetricBand) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBand) From(a SymmetricBandCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/doc.go b/vendor/gonum.org/v1/gonum/blas/blas64/doc.go new file mode 100644 index 00000000..7410cee4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blas64 provides a simple interface to the float64 BLAS API. +package blas64 // import "gonum.org/v1/gonum/blas/blas64" diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go b/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go new file mode 100644 index 00000000..1205da8a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go @@ -0,0 +1,508 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cblas128 + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/gonum" +) + +var cblas128 blas.Complex128 = gonum.Implementation{} + +// Use sets the BLAS complex128 implementation to be used by subsequent BLAS calls. +// The default implementation is +// gonum.org/v1/gonum/blas/gonum.Implementation. +func Use(b blas.Complex128) { + cblas128 = b +} + +// Implementation returns the current BLAS complex128 implementation. +// +// Implementation allows direct calls to the current the BLAS complex128 implementation +// giving finer control of parameters. +func Implementation() blas.Complex128 { + return cblas128 +} + +// Vector represents a vector with an associated element increment. +type Vector struct { + Inc int + Data []complex128 +} + +// General represents a matrix using the conventional storage scheme. +type General struct { + Rows, Cols int + Stride int + Data []complex128 +} + +// Band represents a band matrix using the band storage scheme. +type Band struct { + Rows, Cols int + KL, KU int + Stride int + Data []complex128 +} + +// Triangular represents a triangular matrix using the conventional storage scheme. +type Triangular struct { + N int + Stride int + Data []complex128 + Uplo blas.Uplo + Diag blas.Diag +} + +// TriangularBand represents a triangular matrix using the band storage scheme. +type TriangularBand struct { + N, K int + Stride int + Data []complex128 + Uplo blas.Uplo + Diag blas.Diag +} + +// TriangularPacked represents a triangular matrix using the packed storage scheme. +type TriangularPacked struct { + N int + Data []complex128 + Uplo blas.Uplo + Diag blas.Diag +} + +// Symmetric represents a symmetric matrix using the conventional storage scheme. +type Symmetric struct { + N int + Stride int + Data []complex128 + Uplo blas.Uplo +} + +// SymmetricBand represents a symmetric matrix using the band storage scheme. +type SymmetricBand struct { + N, K int + Stride int + Data []complex128 + Uplo blas.Uplo +} + +// SymmetricPacked represents a symmetric matrix using the packed storage scheme. +type SymmetricPacked struct { + N int + Data []complex128 + Uplo blas.Uplo +} + +// Hermitian represents an Hermitian matrix using the conventional storage scheme. +type Hermitian Symmetric + +// HermitianBand represents an Hermitian matrix using the band storage scheme. +type HermitianBand SymmetricBand + +// HermitianPacked represents an Hermitian matrix using the packed storage scheme. +type HermitianPacked SymmetricPacked + +// Level 1 + +const negInc = "cblas128: negative vector increment" + +// Dotu computes the dot product of the two vectors without +// complex conjugation: +// x^T * y. +func Dotu(n int, x, y Vector) complex128 { + return cblas128.Zdotu(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Dotc computes the dot product of the two vectors with +// complex conjugation: +// x^H * y. +func Dotc(n int, x, y Vector) complex128 { + return cblas128.Zdotc(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Nrm2 computes the Euclidean norm of the vector x: +// sqrt(\sum_i x[i] * x[i]). +// +// Nrm2 will panic if the vector increment is negative. +func Nrm2(n int, x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return cblas128.Dznrm2(n, x.Data, x.Inc) +} + +// Asum computes the sum of magnitudes of the real and imaginary parts of +// elements of the vector x: +// \sum_i (|Re x[i]| + |Im x[i]|). +// +// Asum will panic if the vector increment is negative. +func Asum(n int, x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return cblas128.Dzasum(n, x.Data, x.Inc) +} + +// Iamax returns the index of an element of x with the largest sum of +// magnitudes of the real and imaginary parts (|Re x[i]|+|Im x[i]|). +// If there are multiple such indices, the earliest is returned. +// +// Iamax returns -1 if n == 0. +// +// Iamax will panic if the vector increment is negative. +func Iamax(n int, x Vector) int { + if x.Inc < 0 { + panic(negInc) + } + return cblas128.Izamax(n, x.Data, x.Inc) +} + +// Swap exchanges the elements of two vectors: +// x[i], y[i] = y[i], x[i] for all i. +func Swap(n int, x, y Vector) { + cblas128.Zswap(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Copy copies the elements of x into the elements of y: +// y[i] = x[i] for all i. +func Copy(n int, x, y Vector) { + cblas128.Zcopy(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Axpy computes +// y = alpha * x + y, +// where x and y are vectors, and alpha is a scalar. +func Axpy(n int, alpha complex128, x, y Vector) { + cblas128.Zaxpy(n, alpha, x.Data, x.Inc, y.Data, y.Inc) +} + +// Scal computes +// x = alpha * x, +// where x is a vector, and alpha is a scalar. +// +// Scal will panic if the vector increment is negative. +func Scal(n int, alpha complex128, x Vector) { + if x.Inc < 0 { + panic(negInc) + } + cblas128.Zscal(n, alpha, x.Data, x.Inc) +} + +// Dscal computes +// x = alpha * x, +// where x is a vector, and alpha is a real scalar. +// +// Dscal will panic if the vector increment is negative. +func Dscal(n int, alpha float64, x Vector) { + if x.Inc < 0 { + panic(negInc) + } + cblas128.Zdscal(n, alpha, x.Data, x.Inc) +} + +// Level 2 + +// Gemv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans, +// y = alpha * A^H * x + beta * y, if t == blas.ConjTrans, +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are +// scalars. +func Gemv(t blas.Transpose, alpha complex128, a General, x Vector, beta complex128, y Vector) { + cblas128.Zgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Gbmv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans, +// y = alpha * A^H * x + beta * y, if t == blas.ConjTrans, +// where A is an m×n band matrix, x and y are vectors, and alpha and beta are +// scalars. +func Gbmv(t blas.Transpose, alpha complex128, a Band, x Vector, beta complex128, y Vector) { + cblas128.Zgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Trmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans, +// x = A^H * x, if t == blas.ConjTrans, +// where A is an n×n triangular matrix, and x is a vector. +func Trmv(t blas.Transpose, a Triangular, x Vector) { + cblas128.Ztrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans, +// x = A^H * x, if t == blas.ConjTrans, +// where A is an n×n triangular band matrix, and x is a vector. +func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { + cblas128.Ztbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans, +// x = A^H * x, if t == blas.ConjTrans, +// where A is an n×n triangular matrix in packed format, and x is a vector. +func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { + cblas128.Ztpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Trsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans, +// A^H * x = b, if t == blas.ConjTrans, +// where A is an n×n triangular matrix and x is a vector. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Trsv(t blas.Transpose, a Triangular, x Vector) { + cblas128.Ztrsv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans, +// A^H * x = b, if t == blas.ConjTrans, +// where A is an n×n triangular band matrix, and x is a vector. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { + cblas128.Ztbsv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans, +// A^H * x = b, if t == blas.ConjTrans, +// where A is an n×n triangular matrix in packed format and x is a vector. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { + cblas128.Ztpsv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Hemv computes +// y = alpha * A * x + beta * y, +// where A is an n×n Hermitian matrix, x and y are vectors, and alpha and +// beta are scalars. +func Hemv(alpha complex128, a Hermitian, x Vector, beta complex128, y Vector) { + cblas128.Zhemv(a.Uplo, a.N, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Hbmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n Hermitian band matrix, x and y are vectors, and alpha +// and beta are scalars. +func Hbmv(alpha complex128, a HermitianBand, x Vector, beta complex128, y Vector) { + cblas128.Zhbmv(a.Uplo, a.N, a.K, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Hpmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n Hermitian matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +func Hpmv(alpha complex128, a HermitianPacked, x Vector, beta complex128, y Vector) { + cblas128.Zhpmv(a.Uplo, a.N, alpha, a.Data, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Geru performs a rank-1 update +// A += alpha * x * y^T, +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Geru(alpha complex128, x, y Vector, a General) { + cblas128.Zgeru(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Gerc performs a rank-1 update +// A += alpha * x * y^H, +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Gerc(alpha complex128, x, y Vector, a General) { + cblas128.Zgerc(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Her performs a rank-1 update +// A += alpha * x * y^T, +// where A is an m×n Hermitian matrix, x and y are vectors, and alpha is a scalar. +func Her(alpha float64, x Vector, a Hermitian) { + cblas128.Zher(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) +} + +// Hpr performs a rank-1 update +// A += alpha * x * x^H, +// where A is an n×n Hermitian matrix in packed format, x is a vector, and +// alpha is a scalar. +func Hpr(alpha float64, x Vector, a HermitianPacked) { + cblas128.Zhpr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data) +} + +// Her2 performs a rank-2 update +// A += alpha * x * y^H + conj(alpha) * y * x^H, +// where A is an n×n Hermitian matrix, x and y are vectors, and alpha is a scalar. +func Her2(alpha complex128, x, y Vector, a Hermitian) { + cblas128.Zher2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Hpr2 performs a rank-2 update +// A += alpha * x * y^H + conj(alpha) * y * x^H, +// where A is an n×n Hermitian matrix in packed format, x and y are vectors, +// and alpha is a scalar. +func Hpr2(alpha complex128, x, y Vector, a HermitianPacked) { + cblas128.Zhpr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data) +} + +// Level 3 + +// Gemm computes +// C = alpha * A * B + beta * C, +// where A, B, and C are dense matrices, and alpha and beta are scalars. +// tA and tB specify whether A or B are transposed or conjugated. +func Gemm(tA, tB blas.Transpose, alpha complex128, a, b General, beta complex128, c General) { + var m, n, k int + if tA == blas.NoTrans { + m, k = a.Rows, a.Cols + } else { + m, k = a.Cols, a.Rows + } + if tB == blas.NoTrans { + n = b.Cols + } else { + n = b.Rows + } + cblas128.Zgemm(tA, tB, m, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Symm performs +// C = alpha * A * B + beta * C, if s == blas.Left, +// C = alpha * B * A + beta * C, if s == blas.Right, +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and +// alpha and beta are scalars. +func Symm(s blas.Side, alpha complex128, a Symmetric, b General, beta complex128, c General) { + var m, n int + if s == blas.Left { + m, n = a.N, b.Cols + } else { + m, n = b.Rows, a.N + } + cblas128.Zsymm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Syrk performs a symmetric rank-k update +// C = alpha * A * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * A + beta * C, if t == blas.Trans, +// where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans +// and a k×n matrix otherwise, and alpha and beta are scalars. +func Syrk(t blas.Transpose, alpha complex128, a General, beta complex128, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + cblas128.Zsyrk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) +} + +// Syr2k performs a symmetric rank-2k update +// C = alpha * A * B^T + alpha * B * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * B + alpha * B^T * A + beta * C, if t == blas.Trans, +// where C is an n×n symmetric matrix, A and B are n×k matrices if +// t == blas.NoTrans and k×n otherwise, and alpha and beta are scalars. +func Syr2k(t blas.Transpose, alpha complex128, a, b General, beta complex128, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + cblas128.Zsyr2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Trmm performs +// B = alpha * A * B, if tA == blas.NoTrans and s == blas.Left, +// B = alpha * A^T * B, if tA == blas.Trans and s == blas.Left, +// B = alpha * A^H * B, if tA == blas.ConjTrans and s == blas.Left, +// B = alpha * B * A, if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * A^T, if tA == blas.Trans and s == blas.Right, +// B = alpha * B * A^H, if tA == blas.ConjTrans and s == blas.Right, +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is +// a scalar. +func Trmm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b General) { + cblas128.Ztrmm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} + +// Trsm solves +// A * X = alpha * B, if tA == blas.NoTrans and s == blas.Left, +// A^T * X = alpha * B, if tA == blas.Trans and s == blas.Left, +// A^H * X = alpha * B, if tA == blas.ConjTrans and s == blas.Left, +// X * A = alpha * B, if tA == blas.NoTrans and s == blas.Right, +// X * A^T = alpha * B, if tA == blas.Trans and s == blas.Right, +// X * A^H = alpha * B, if tA == blas.ConjTrans and s == blas.Right, +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and +// alpha is a scalar. +// +// At entry to the function, b contains the values of B, and the result is +// stored in-place into b. +// +// No check is made that A is invertible. +func Trsm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b General) { + cblas128.Ztrsm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} + +// Hemm performs +// C = alpha * A * B + beta * C, if s == blas.Left, +// C = alpha * B * A + beta * C, if s == blas.Right, +// where A is an n×n or m×m Hermitian matrix, B and C are m×n matrices, and +// alpha and beta are scalars. +func Hemm(s blas.Side, alpha complex128, a Hermitian, b General, beta complex128, c General) { + var m, n int + if s == blas.Left { + m, n = a.N, b.Cols + } else { + m, n = b.Rows, a.N + } + cblas128.Zhemm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Herk performs the Hermitian rank-k update +// C = alpha * A * A^H + beta*C, if t == blas.NoTrans, +// C = alpha * A^H * A + beta*C, if t == blas.ConjTrans, +// where C is an n×n Hermitian matrix, A is an n×k matrix if t == blas.NoTrans +// and a k×n matrix otherwise, and alpha and beta are scalars. +func Herk(t blas.Transpose, alpha float64, a General, beta float64, c Hermitian) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + cblas128.Zherk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) +} + +// Her2k performs the Hermitian rank-2k update +// C = alpha * A * B^H + conj(alpha) * B * A^H + beta * C, if t == blas.NoTrans, +// C = alpha * A^H * B + conj(alpha) * B^H * A + beta * C, if t == blas.ConjTrans, +// where C is an n×n Hermitian matrix, A and B are n×k matrices if t == NoTrans +// and k×n matrices otherwise, and alpha and beta are scalars. +func Her2k(t blas.Transpose, alpha complex128, a, b General, beta float64, c Hermitian) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + cblas128.Zher2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go new file mode 100644 index 00000000..93e3cd2f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go @@ -0,0 +1,279 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cblas128 + +import "gonum.org/v1/gonum/blas" + +// GeneralCols represents a matrix using the conventional column-major storage scheme. +type GeneralCols General + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t GeneralCols) From(a General) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("cblas128: mismatched dimension") + } + if len(t.Data) < (t.Cols-1)*t.Stride+t.Rows { + panic("cblas128: short data slice") + } + for i := 0; i < a.Rows; i++ { + for j, v := range a.Data[i*a.Stride : i*a.Stride+a.Cols] { + t.Data[i+j*t.Stride] = v + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t General) From(a GeneralCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("cblas128: mismatched dimension") + } + if len(t.Data) < (t.Rows-1)*t.Stride+t.Cols { + panic("cblas128: short data slice") + } + for j := 0; j < a.Cols; j++ { + for i, v := range a.Data[j*a.Stride : j*a.Stride+a.Rows] { + t.Data[i*t.Stride+j] = v + } + } +} + +// TriangularCols represents a matrix using the conventional column-major storage scheme. +type TriangularCols Triangular + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t TriangularCols) From(a Triangular) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("cblas128: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t Triangular) From(a TriangularCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("cblas128: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// BandCols represents a matrix using the band column-major storage scheme. +type BandCols Band + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t BandCols) From(a Band) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("cblas128: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("cblas128: short stride for destination") + } + for i := 0; i < a.Rows; i++ { + for j := max(0, i-a.KL); j < min(i+a.KU+1, a.Cols); j++ { + t.Data[i+t.KU-j+j*t.Stride] = a.Data[j+a.KL-i+i*a.Stride] + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t Band) From(a BandCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("cblas128: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("cblas128: short stride for destination") + } + for j := 0; j < a.Cols; j++ { + for i := max(0, j-a.KU); i < min(j+a.KL+1, a.Rows); i++ { + t.Data[j+a.KL-i+i*a.Stride] = a.Data[i+t.KU-j+j*t.Stride] + } + } +} + +// TriangularBandCols represents a symmetric matrix using the band column-major storage scheme. +type TriangularBandCols TriangularBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBandCols) From(a TriangularBand) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("cblas128: mismatched BLAS diag") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBand) From(a TriangularBandCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("cblas128: mismatched BLAS diag") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go new file mode 100644 index 00000000..51c3a577 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go @@ -0,0 +1,155 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cblas128 + +import "gonum.org/v1/gonum/blas" + +// HermitianCols represents a matrix using the conventional column-major storage scheme. +type HermitianCols Hermitian + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t HermitianCols) From(a Hermitian) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t Hermitian) From(a HermitianCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// HermitianBandCols represents an Hermitian matrix using the band column-major storage scheme. +type HermitianBandCols HermitianBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t HermitianBandCols) From(a HermitianBand) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t HermitianBand) From(a HermitianBandCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go new file mode 100644 index 00000000..f1bf40c2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go @@ -0,0 +1,155 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cblas128 + +import "gonum.org/v1/gonum/blas" + +// SymmetricCols represents a matrix using the conventional column-major storage scheme. +type SymmetricCols Symmetric + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t SymmetricCols) From(a Symmetric) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t Symmetric) From(a SymmetricCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// SymmetricBandCols represents a symmetric matrix using the band column-major storage scheme. +type SymmetricBandCols SymmetricBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBandCols) From(a SymmetricBand) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBand) From(a SymmetricBandCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go b/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go new file mode 100644 index 00000000..09719b19 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cblas128 provides a simple interface to the complex128 BLAS API. +package cblas128 // import "gonum.org/v1/gonum/blas/cblas128" diff --git a/vendor/gonum.org/v1/gonum/blas/conversions.bash b/vendor/gonum.org/v1/gonum/blas/conversions.bash new file mode 100644 index 00000000..d1c0ef0d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/conversions.bash @@ -0,0 +1,159 @@ +#!/usr/bin/env bash + +# Copyright ©2017 The Gonum Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Generate code for blas32. +echo Generating blas32/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ +\ +>> blas32/conv.go + +echo Generating blas32/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +\ +>> blas32/conv_test.go + +echo Generating blas32/conv_symmetric.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_symmetric.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ +\ +>> blas32/conv_symmetric.go + +echo Generating blas32/conv_symmetric_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_symmetric_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +\ +>> blas32/conv_symmetric_test.go + + +# Generate code for cblas128. +echo Generating cblas128/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ +\ +>> cblas128/conv.go + +echo Generating cblas128/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_test.go + +echo Generating cblas128/conv_symmetric.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_symmetric.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ +\ +>> cblas128/conv_symmetric.go + +echo Generating cblas128/conv_symmetric_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_symmetric_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_symmetric_test.go + +echo Generating cblas128/conv_hermitian.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_hermitian.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ +\ +>> cblas128/conv_hermitian.go + +echo Generating cblas128/conv_hermitian_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_hermitian_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_hermitian_test.go + + +# Generate code for cblas64. +echo Generating cblas64/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ +\ +>> cblas64/conv.go + +echo Generating cblas64/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/cmplx64"_' \ +\ +>> cblas64/conv_test.go + +echo Generating cblas64/conv_hermitian.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_hermitian.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ +\ +>> cblas64/conv_hermitian.go + +echo Generating cblas64/conv_hermitian_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_hermitian_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/cmplx64"_' \ +\ +>> cblas64/conv_hermitian_test.go diff --git a/vendor/gonum.org/v1/gonum/blas/doc.go b/vendor/gonum.org/v1/gonum/blas/doc.go new file mode 100644 index 00000000..ea4b16c9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/doc.go @@ -0,0 +1,108 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package blas provides interfaces for the BLAS linear algebra standard. + +All methods must perform appropriate parameter checking and panic if +provided parameters that do not conform to the requirements specified +by the BLAS standard. + +Quick Reference Guide to the BLAS from http://www.netlib.org/lapack/lug/node145.html + +This version is modified to remove the "order" option. All matrix operations are +on row-order matrices. + +Level 1 BLAS + + dim scalar vector vector scalars 5-element prefixes + struct + + _rotg ( a, b ) S, D + _rotmg( d1, d2, a, b ) S, D + _rot ( n, x, incX, y, incY, c, s ) S, D + _rotm ( n, x, incX, y, incY, param ) S, D + _swap ( n, x, incX, y, incY ) S, D, C, Z + _scal ( n, alpha, x, incX ) S, D, C, Z, Cs, Zd + _copy ( n, x, incX, y, incY ) S, D, C, Z + _axpy ( n, alpha, x, incX, y, incY ) S, D, C, Z + _dot ( n, x, incX, y, incY ) S, D, Ds + _dotu ( n, x, incX, y, incY ) C, Z + _dotc ( n, x, incX, y, incY ) C, Z + __dot ( n, alpha, x, incX, y, incY ) Sds + _nrm2 ( n, x, incX ) S, D, Sc, Dz + _asum ( n, x, incX ) S, D, Sc, Dz + I_amax( n, x, incX ) s, d, c, z + +Level 2 BLAS + + options dim b-width scalar matrix vector scalar vector prefixes + + _gemv ( trans, m, n, alpha, a, lda, x, incX, beta, y, incY ) S, D, C, Z + _gbmv ( trans, m, n, kL, kU, alpha, a, lda, x, incX, beta, y, incY ) S, D, C, Z + _hemv ( uplo, n, alpha, a, lda, x, incX, beta, y, incY ) C, Z + _hbmv ( uplo, n, k, alpha, a, lda, x, incX, beta, y, incY ) C, Z + _hpmv ( uplo, n, alpha, ap, x, incX, beta, y, incY ) C, Z + _symv ( uplo, n, alpha, a, lda, x, incX, beta, y, incY ) S, D + _sbmv ( uplo, n, k, alpha, a, lda, x, incX, beta, y, incY ) S, D + _spmv ( uplo, n, alpha, ap, x, incX, beta, y, incY ) S, D + _trmv ( uplo, trans, diag, n, a, lda, x, incX ) S, D, C, Z + _tbmv ( uplo, trans, diag, n, k, a, lda, x, incX ) S, D, C, Z + _tpmv ( uplo, trans, diag, n, ap, x, incX ) S, D, C, Z + _trsv ( uplo, trans, diag, n, a, lda, x, incX ) S, D, C, Z + _tbsv ( uplo, trans, diag, n, k, a, lda, x, incX ) S, D, C, Z + _tpsv ( uplo, trans, diag, n, ap, x, incX ) S, D, C, Z + + options dim scalar vector vector matrix prefixes + + _ger ( m, n, alpha, x, incX, y, incY, a, lda ) S, D + _geru ( m, n, alpha, x, incX, y, incY, a, lda ) C, Z + _gerc ( m, n, alpha, x, incX, y, incY, a, lda ) C, Z + _her ( uplo, n, alpha, x, incX, a, lda ) C, Z + _hpr ( uplo, n, alpha, x, incX, ap ) C, Z + _her2 ( uplo, n, alpha, x, incX, y, incY, a, lda ) C, Z + _hpr2 ( uplo, n, alpha, x, incX, y, incY, ap ) C, Z + _syr ( uplo, n, alpha, x, incX, a, lda ) S, D + _spr ( uplo, n, alpha, x, incX, ap ) S, D + _syr2 ( uplo, n, alpha, x, incX, y, incY, a, lda ) S, D + _spr2 ( uplo, n, alpha, x, incX, y, incY, ap ) S, D + +Level 3 BLAS + + options dim scalar matrix matrix scalar matrix prefixes + + _gemm ( transA, transB, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _symm ( side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _hemm ( side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc ) C, Z + _syrk ( uplo, trans, n, k, alpha, a, lda, beta, c, ldc ) S, D, C, Z + _herk ( uplo, trans, n, k, alpha, a, lda, beta, c, ldc ) C, Z + _syr2k( uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _her2k( uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) C, Z + _trmm ( side, uplo, transA, diag, m, n, alpha, a, lda, b, ldb ) S, D, C, Z + _trsm ( side, uplo, transA, diag, m, n, alpha, a, lda, b, ldb ) S, D, C, Z + +Meaning of prefixes + + S - float32 C - complex64 + D - float64 Z - complex128 + +Matrix types + + GE - GEneral GB - General Band + SY - SYmmetric SB - Symmetric Band SP - Symmetric Packed + HE - HErmitian HB - Hermitian Band HP - Hermitian Packed + TR - TRiangular TB - Triangular Band TP - Triangular Packed + +Options + + trans = NoTrans, Trans, ConjTrans + uplo = Upper, Lower + diag = Nonunit, Unit + side = Left, Right (A or op(A) on the left, or A or op(A) on the right) + +For real matrices, Trans and ConjTrans have the same meaning. +For Hermitian matrices, trans = Trans is not allowed. +For complex symmetric matrices, trans = ConjTrans is not allowed. +*/ +package blas // import "gonum.org/v1/gonum/blas" diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go b/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go new file mode 100644 index 00000000..ec3fcc61 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go @@ -0,0 +1,314 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "runtime" + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Dgemm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C +// C = alpha * A^T * B + beta * C +// C = alpha * A * B^T + beta * C +// C = alpha * A^T * B^T + beta * C +// where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is +// an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or +// B are transposed. +func (Implementation) Dgemm(tA, tB blas.Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + switch tA { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch tB { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + aTrans := tA == blas.Trans || tA == blas.ConjTrans + if aTrans { + if lda < max(1, m) { + panic(badLdA) + } + } else { + if lda < max(1, k) { + panic(badLdA) + } + } + bTrans := tB == blas.Trans || tB == blas.ConjTrans + if bTrans { + if ldb < max(1, k) { + panic(badLdB) + } + } else { + if ldb < max(1, n) { + panic(badLdB) + } + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if aTrans { + if len(a) < (k-1)*lda+m { + panic(shortA) + } + } else { + if len(a) < (m-1)*lda+k { + panic(shortA) + } + } + if bTrans { + if len(b) < (n-1)*ldb+k { + panic(shortB) + } + } else { + if len(b) < (k-1)*ldb+n { + panic(shortB) + } + } + if len(c) < (m-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + // scale c + if beta != 1 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + } + } + + dgemmParallel(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) +} + +func dgemmParallel(aTrans, bTrans bool, m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // dgemmParallel computes a parallel matrix multiplication by partitioning + // a and b into sub-blocks, and updating c with the multiplication of the sub-block + // In all cases, + // A = [ A_11 A_12 ... A_1j + // A_21 A_22 ... A_2j + // ... + // A_i1 A_i2 ... A_ij] + // + // and same for B. All of the submatrix sizes are blockSize×blockSize except + // at the edges. + // + // In all cases, there is one dimension for each matrix along which + // C must be updated sequentially. + // Cij = \sum_k Aik Bki, (A * B) + // Cij = \sum_k Aki Bkj, (A^T * B) + // Cij = \sum_k Aik Bjk, (A * B^T) + // Cij = \sum_k Aki Bjk, (A^T * B^T) + // + // This code computes one {i, j} block sequentially along the k dimension, + // and computes all of the {i, j} blocks concurrently. This + // partitioning allows Cij to be updated in-place without race-conditions. + // Instead of launching a goroutine for each possible concurrent computation, + // a number of worker goroutines are created and channels are used to pass + // available and completed cases. + // + // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix + // multiplies, though this code does not copy matrices to attempt to eliminate + // cache misses. + + maxKLen := k + parBlocks := blocks(m, blockSize) * blocks(n, blockSize) + if parBlocks < minParBlock { + // The matrix multiplication is small in the dimensions where it can be + // computed concurrently. Just do it in serial. + dgemmSerial(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + } + + nWorkers := runtime.GOMAXPROCS(0) + if parBlocks < nWorkers { + nWorkers = parBlocks + } + // There is a tradeoff between the workers having to wait for work + // and a large buffer making operations slow. + buf := buffMul * nWorkers + if buf > parBlocks { + buf = parBlocks + } + + sendChan := make(chan subMul, buf) + + // Launch workers. A worker receives an {i, j} submatrix of c, and computes + // A_ik B_ki (or the transposed version) storing the result in c_ij. When the + // channel is finally closed, it signals to the waitgroup that it has finished + // computing. + var wg sync.WaitGroup + for i := 0; i < nWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for sub := range sendChan { + i := sub.i + j := sub.j + leni := blockSize + if i+leni > m { + leni = m - i + } + lenj := blockSize + if j+lenj > n { + lenj = n - j + } + + cSub := sliceView64(c, ldc, i, j, leni, lenj) + + // Compute A_ik B_kj for all k + for k := 0; k < maxKLen; k += blockSize { + lenk := blockSize + if k+lenk > maxKLen { + lenk = maxKLen - k + } + var aSub, bSub []float64 + if aTrans { + aSub = sliceView64(a, lda, k, i, lenk, leni) + } else { + aSub = sliceView64(a, lda, i, k, leni, lenk) + } + if bTrans { + bSub = sliceView64(b, ldb, j, k, lenj, lenk) + } else { + bSub = sliceView64(b, ldb, k, j, lenk, lenj) + } + dgemmSerial(aTrans, bTrans, leni, lenj, lenk, aSub, lda, bSub, ldb, cSub, ldc, alpha) + } + } + }() + } + + // Send out all of the {i, j} subblocks for computation. + for i := 0; i < m; i += blockSize { + for j := 0; j < n; j += blockSize { + sendChan <- subMul{ + i: i, + j: j, + } + } + } + close(sendChan) + wg.Wait() +} + +// dgemmSerial is serial matrix multiply +func dgemmSerial(aTrans, bTrans bool, m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + switch { + case !aTrans && !bTrans: + dgemmSerialNotNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && !bTrans: + dgemmSerialTransNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case !aTrans && bTrans: + dgemmSerialNotTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && bTrans: + dgemmSerialTransTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + default: + panic("unreachable") + } +} + +// dgemmSerial where neither a nor b are transposed +func dgemmSerialNotNot(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for l, v := range a[i*lda : i*lda+k] { + tmp := alpha * v + if tmp != 0 { + f64.AxpyUnitary(tmp, b[l*ldb:l*ldb+n], ctmp) + } + } + } +} + +// dgemmSerial where neither a is transposed and b is not +func dgemmSerialTransNot(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + btmp := b[l*ldb : l*ldb+n] + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f64.AxpyUnitary(tmp, btmp, ctmp) + } + } + } +} + +// dgemmSerial where neither a is not transposed and b is +func dgemmSerialNotTrans(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + atmp := a[i*lda : i*lda+k] + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] += alpha * f64.DotUnitary(atmp, b[j*ldb:j*ldb+k]) + } + } +} + +// dgemmSerial where both are transposed +func dgemmSerialTransTrans(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f64.AxpyInc(tmp, b[l:], ctmp, uintptr(n), uintptr(ldb), 1, 0, 0) + } + } + } +} + +func sliceView64(a []float64, lda, i, j, r, c int) []float64 { + return a[i*lda+j : (i+r-1)*lda+j+c] +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/doc.go b/vendor/gonum.org/v1/gonum/blas/gonum/doc.go new file mode 100644 index 00000000..3f4b6c1d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/doc.go @@ -0,0 +1,88 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Ensure changes made to blas/native are reflected in blas/cgo where relevant. + +/* +Package gonum is a Go implementation of the BLAS API. This implementation +panics when the input arguments are invalid as per the standard, for example +if a vector increment is zero. Note that the treatment of NaN values +is not specified, and differs among the BLAS implementations. +gonum.org/v1/gonum/blas/blas64 provides helpful wrapper functions to the BLAS +interface. The rest of this text describes the layout of the data for the input types. + +Note that in the function documentation, x[i] refers to the i^th element +of the vector, which will be different from the i^th element of the slice if +incX != 1. + +See http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html +for more license information. + +Vector arguments are effectively strided slices. They have two input arguments, +a number of elements, n, and an increment, incX. The increment specifies the +distance between elements of the vector. The actual Go slice may be longer +than necessary. +The increment may be positive or negative, except in functions with only +a single vector argument where the increment may only be positive. If the increment +is negative, s[0] is the last element in the slice. Note that this is not the same +as counting backward from the end of the slice, as len(s) may be longer than +necessary. So, for example, if n = 5 and incX = 3, the elements of s are + [0 * * 1 * * 2 * * 3 * * 4 * * * ...] +where ∗ elements are never accessed. If incX = -3, the same elements are +accessed, just in reverse order (4, 3, 2, 1, 0). + +Dense matrices are specified by a number of rows, a number of columns, and a stride. +The stride specifies the number of entries in the slice between the first element +of successive rows. The stride must be at least as large as the number of columns +but may be longer. + [a00 ... a0n a0* ... a1stride-1 a21 ... amn am* ... amstride-1] +Thus, dense[i*ld + j] refers to the {i, j}th element of the matrix. + +Symmetric and triangular matrices (non-packed) are stored identically to Dense, +except that only elements in one triangle of the matrix are accessed. + +Packed symmetric and packed triangular matrices are laid out with the entries +condensed such that all of the unreferenced elements are removed. So, the upper triangular +matrix + [ + 1 2 3 + 0 4 5 + 0 0 6 + ] +and the lower-triangular matrix + [ + 1 0 0 + 2 3 0 + 4 5 6 + ] +will both be compacted as [1 2 3 4 5 6]. The (i, j) element of the original +dense matrix can be found at element i*n - (i-1)*i/2 + j for upper triangular, +and at element i * (i+1) /2 + j for lower triangular. + +Banded matrices are laid out in a compact format, constructed by removing the +zeros in the rows and aligning the diagonals. For example, the matrix + [ + 1 2 3 0 0 0 + 4 5 6 7 0 0 + 0 8 9 10 11 0 + 0 0 12 13 14 15 + 0 0 0 16 17 18 + 0 0 0 0 19 20 + ] + +implicitly becomes (∗ entries are never accessed) + [ + * 1 2 3 + 4 5 6 7 + 8 9 10 11 + 12 13 14 15 + 16 17 18 * + 19 20 * * + ] +which is given to the BLAS routine as [∗ 1 2 3 4 ...]. + +See http://www.crest.iu.edu/research/mtl/reference/html/banded.html +for more information +*/ +package gonum // import "gonum.org/v1/gonum/blas/gonum" diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/errors.go b/vendor/gonum.org/v1/gonum/blas/gonum/errors.go new file mode 100644 index 00000000..e98575d0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/errors.go @@ -0,0 +1,35 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Panic strings used during parameter checks. +// This list is duplicated in netlib/blas/netlib. Keep in sync. +const ( + zeroIncX = "blas: zero x index increment" + zeroIncY = "blas: zero y index increment" + + mLT0 = "blas: m < 0" + nLT0 = "blas: n < 0" + kLT0 = "blas: k < 0" + kLLT0 = "blas: kL < 0" + kULT0 = "blas: kU < 0" + + badUplo = "blas: illegal triangle" + badTranspose = "blas: illegal transpose" + badDiag = "blas: illegal diagonal" + badSide = "blas: illegal side" + badFlag = "blas: illegal rotm flag" + + badLdA = "blas: bad leading dimension of A" + badLdB = "blas: bad leading dimension of B" + badLdC = "blas: bad leading dimension of C" + + shortX = "blas: insufficient length of x" + shortY = "blas: insufficient length of y" + shortAP = "blas: insufficient length of ap" + shortA = "blas: insufficient length of a" + shortB = "blas: insufficient length of b" + shortC = "blas: insufficient length of c" +) diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go b/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go new file mode 100644 index 00000000..9b9a1beb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go @@ -0,0 +1,190 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// TODO(Kunde21): Merge these methods back into level2double/level2single when Sgemv assembly kernels are merged into f32. + +// Dgemv computes +// y = alpha * A * x + beta * y if tA = blas.NoTrans +// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func (Implementation) Dgemv(tA blas.Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + // Set up indexes + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + + // Quick return if possible + if m == 0 || n == 0 { + return + } + + if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + // First form y = beta * y + if incY > 0 { + Implementation{}.Dscal(lenY, beta, y, incY) + } else { + Implementation{}.Dscal(lenY, beta, y, -incY) + } + return + } + + // Form y = alpha * A * x + y + if tA == blas.NoTrans { + f64.GemvN(uintptr(m), uintptr(n), alpha, a, uintptr(lda), x, uintptr(incX), beta, y, uintptr(incY)) + return + } + // Cases where a is transposed. + f64.GemvT(uintptr(m), uintptr(n), alpha, a, uintptr(lda), x, uintptr(incX), beta, y, uintptr(incY)) +} + +// Sgemv computes +// y = alpha * A * x + beta * y if tA = blas.NoTrans +// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgemv(tA blas.Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // Set up indexes + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // First form y = beta * y + if incY > 0 { + Implementation{}.Sscal(lenY, beta, y, incY) + } else { + Implementation{}.Sscal(lenY, beta, y, -incY) + } + + if alpha == 0 { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = alpha * A * x + y + if tA == blas.NoTrans { + if incX == 1 && incY == 1 { + for i := 0; i < m; i++ { + y[i] += alpha * f32.DotUnitary(a[lda*i:lda*i+n], x[:n]) + } + return + } + iy := ky + for i := 0; i < m; i++ { + y[iy] += alpha * f32.DotInc(x, a[lda*i:lda*i+n], uintptr(n), uintptr(incX), 1, uintptr(kx), 0) + iy += incY + } + return + } + // Cases where a is transposed. + if incX == 1 && incY == 1 { + for i := 0; i < m; i++ { + tmp := alpha * x[i] + if tmp != 0 { + f32.AxpyUnitaryTo(y, tmp, a[lda*i:lda*i+n], y[:n]) + } + } + return + } + ix := kx + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + if tmp != 0 { + f32.AxpyInc(tmp, a[lda*i:lda*i+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) + } + ix += incX + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go new file mode 100644 index 00000000..8ab8d43e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go @@ -0,0 +1,58 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate ./single_precision.bash + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/internal/math32" +) + +type Implementation struct{} + +// [SD]gemm behavior constants. These are kept here to keep them out of the +// way during single precision code genration. +const ( + blockSize = 64 // b x b matrix + minParBlock = 4 // minimum number of blocks needed to go parallel + buffMul = 4 // how big is the buffer relative to the number of workers +) + +// subMul is a common type shared by [SD]gemm. +type subMul struct { + i, j int // index of block +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a > b { + return b + } + return a +} + +// blocks returns the number of divisions of the dimension length with the given +// block size. +func blocks(dim, bsize int) int { + return (dim + bsize - 1) / bsize +} + +// dcabs1 returns |real(z)|+|imag(z)|. +func dcabs1(z complex128) float64 { + return math.Abs(real(z)) + math.Abs(imag(z)) +} + +// scabs1 returns |real(z)|+|imag(z)|. +func scabs1(z complex64) float32 { + return math32.Abs(real(z)) + math32.Abs(imag(z)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go new file mode 100644 index 00000000..e37bf44d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go @@ -0,0 +1,445 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c128" +) + +var _ blas.Complex128Level1 = Implementation{} + +// Dzasum returns the sum of the absolute values of the elements of x +// \sum_i |Re(x[i])| + |Im(x[i])| +// Dzasum returns 0 if incX is negative. +func (Implementation) Dzasum(n int, x []complex128, incX int) float64 { + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + var sum float64 + if incX == 1 { + if len(x) < n { + panic(shortX) + } + for _, v := range x[:n] { + sum += dcabs1(v) + } + return sum + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + for i := 0; i < n; i++ { + v := x[i*incX] + sum += dcabs1(v) + } + return sum +} + +// Dznrm2 computes the Euclidean norm of the complex vector x, +// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// This function returns 0 if incX is negative. +func (Implementation) Dznrm2(n int, x []complex128, incX int) float64 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if n < 1 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + var ( + scale float64 + ssq float64 = 1 + ) + if incX == 1 { + for _, v := range x[:n] { + re, im := math.Abs(real(v)), math.Abs(imag(v)) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) + } + for ix := 0; ix < n*incX; ix += incX { + re, im := math.Abs(real(x[ix])), math.Abs(imag(x[ix])) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) +} + +// Izamax returns the index of the first element of x having largest |Re(·)|+|Im(·)|. +// Izamax returns -1 if n is 0 or incX is negative. +func (Implementation) Izamax(n int, x []complex128, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + // Return invalid index. + return -1 + } + if n < 1 { + if n == 0 { + // Return invalid index. + return -1 + } + panic(nLT0) + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + idx := 0 + max := dcabs1(x[0]) + if incX == 1 { + for i, v := range x[1:n] { + absV := dcabs1(v) + if absV > max { + max = absV + idx = i + 1 + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + absV := dcabs1(x[ix]) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Zaxpy adds alpha times x to y: +// y[i] += alpha * x[i] for all i +func (Implementation) Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + c128.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (1 - n) * incX + } + if incY < 0 { + iy = (1 - n) * incY + } + c128.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zcopy copies the vector x to vector y. +func (Implementation) Zcopy(n int, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Zdotc computes the dot product +// x^H · y +// of two complex vectors x and y. +func (Implementation) Zdotc(n int, x []complex128, incX int, y []complex128, incY int) complex128 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return c128.DotcUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(shortY) + } + return c128.DotcInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zdotu computes the dot product +// x^T · y +// of two complex vectors x and y. +func (Implementation) Zdotu(n int, x []complex128, incX int, y []complex128, incY int) complex128 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return c128.DotuUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(shortY) + } + return c128.DotuInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zdscal scales the vector x by a real scalar alpha. +// Zdscal has no effect if incX < 0. +func (Implementation) Zdscal(n int, alpha float64, x []complex128, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + x = x[:n] + for i, v := range x { + x[i] = complex(alpha*real(v), alpha*imag(v)) + } + return + } + for ix := 0; ix < n*incX; ix += incX { + v := x[ix] + x[ix] = complex(alpha*real(v), alpha*imag(v)) + } +} + +// Zscal scales the vector x by a complex scalar alpha. +// Zscal has no effect if incX < 0. +func (Implementation) Zscal(n int, alpha complex128, x []complex128, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + c128.ScalUnitary(alpha, x[:n]) + return + } + c128.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} + +// Zswap exchanges the elements of two complex vectors x and y. +func (Implementation) Zswap(n int, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go new file mode 100644 index 00000000..ba192ea5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go @@ -0,0 +1,467 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + math "gonum.org/v1/gonum/internal/math32" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c64" +) + +var _ blas.Complex64Level1 = Implementation{} + +// Scasum returns the sum of the absolute values of the elements of x +// \sum_i |Re(x[i])| + |Im(x[i])| +// Scasum returns 0 if incX is negative. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Scasum(n int, x []complex64, incX int) float32 { + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + var sum float32 + if incX == 1 { + if len(x) < n { + panic(shortX) + } + for _, v := range x[:n] { + sum += scabs1(v) + } + return sum + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + for i := 0; i < n; i++ { + v := x[i*incX] + sum += scabs1(v) + } + return sum +} + +// Scnrm2 computes the Euclidean norm of the complex vector x, +// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// This function returns 0 if incX is negative. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Scnrm2(n int, x []complex64, incX int) float32 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if n < 1 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + var ( + scale float32 + ssq float32 = 1 + ) + if incX == 1 { + for _, v := range x[:n] { + re, im := math.Abs(real(v)), math.Abs(imag(v)) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) + } + for ix := 0; ix < n*incX; ix += incX { + re, im := math.Abs(real(x[ix])), math.Abs(imag(x[ix])) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) +} + +// Icamax returns the index of the first element of x having largest |Re(·)|+|Im(·)|. +// Icamax returns -1 if n is 0 or incX is negative. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Icamax(n int, x []complex64, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + // Return invalid index. + return -1 + } + if n < 1 { + if n == 0 { + // Return invalid index. + return -1 + } + panic(nLT0) + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + idx := 0 + max := scabs1(x[0]) + if incX == 1 { + for i, v := range x[1:n] { + absV := scabs1(v) + if absV > max { + max = absV + idx = i + 1 + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + absV := scabs1(x[ix]) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Caxpy adds alpha times x to y: +// y[i] += alpha * x[i] for all i +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + c64.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (1 - n) * incX + } + if incY < 0 { + iy = (1 - n) * incY + } + c64.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Ccopy copies the vector x to vector y. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ccopy(n int, x []complex64, incX int, y []complex64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Cdotc computes the dot product +// x^H · y +// of two complex vectors x and y. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cdotc(n int, x []complex64, incX int, y []complex64, incY int) complex64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return c64.DotcUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(shortY) + } + return c64.DotcInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Cdotu computes the dot product +// x^T · y +// of two complex vectors x and y. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cdotu(n int, x []complex64, incX int, y []complex64, incY int) complex64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return c64.DotuUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(shortY) + } + return c64.DotuInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Csscal scales the vector x by a real scalar alpha. +// Csscal has no effect if incX < 0. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Csscal(n int, alpha float32, x []complex64, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + x = x[:n] + for i, v := range x { + x[i] = complex(alpha*real(v), alpha*imag(v)) + } + return + } + for ix := 0; ix < n*incX; ix += incX { + v := x[ix] + x[ix] = complex(alpha*real(v), alpha*imag(v)) + } +} + +// Cscal scales the vector x by a complex scalar alpha. +// Cscal has no effect if incX < 0. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cscal(n int, alpha complex64, x []complex64, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + c64.ScalUnitary(alpha, x[:n]) + return + } + c64.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} + +// Cswap exchanges the elements of two complex vectors x and y. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cswap(n int, x []complex64, incX int, y []complex64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go new file mode 100644 index 00000000..ee82083a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go @@ -0,0 +1,644 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + math "gonum.org/v1/gonum/internal/math32" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level1 = Implementation{} + +// Snrm2 computes the Euclidean norm of a vector, +// sqrt(\sum_i x[i] * x[i]). +// This function returns 0 if incX is negative. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Snrm2(n int, x []float32, incX int) float32 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if n < 2 { + if n == 1 { + return math.Abs(x[0]) + } + if n == 0 { + return 0 + } + panic(nLT0) + } + var ( + scale float32 = 0 + sumSquares float32 = 1 + ) + if incX == 1 { + x = x[:n] + for _, v := range x { + if v == 0 { + continue + } + absxi := math.Abs(v) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) + } + for ix := 0; ix < n*incX; ix += incX { + val := x[ix] + if val == 0 { + continue + } + absxi := math.Abs(val) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} + +// Sasum computes the sum of the absolute values of the elements of x. +// \sum_i |x[i]| +// Sasum returns 0 if incX is negative. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sasum(n int, x []float32, incX int) float32 { + var sum float32 + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if incX == 1 { + x = x[:n] + for _, v := range x { + sum += math.Abs(v) + } + return sum + } + for i := 0; i < n; i++ { + sum += math.Abs(x[i*incX]) + } + return sum +} + +// Isamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Isamax returns -1 if n == 0. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Isamax(n int, x []float32, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return -1 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if n < 2 { + if n == 1 { + return 0 + } + if n == 0 { + return -1 // Netlib returns invalid index when n == 0. + } + panic(nLT0) + } + idx := 0 + max := math.Abs(x[0]) + if incX == 1 { + for i, v := range x[:n] { + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + v := x[ix] + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Sswap exchanges the elements of two vectors. +// x[i], y[i] = y[i], x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sswap(n int, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} + +// Scopy copies the elements of x into the elements of y. +// y[i] = x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Scopy(n int, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Saxpy adds alpha times x to y +// y[i] += alpha * x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + f32.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + f32.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Srotg computes the plane rotation +// _ _ _ _ _ _ +// | c s | | a | | r | +// | -s c | * | b | = | 0 | +// ‾ ‾ ‾ ‾ ‾ ‾ +// where +// r = ±√(a^2 + b^2) +// c = a/r, the cosine of the plane rotation +// s = b/r, the sine of the plane rotation +// +// NOTE: There is a discrepancy between the reference implementation and the BLAS +// technical manual regarding the sign for r when a or b are zero. +// Srotg agrees with the definition in the manual and other +// common BLAS implementations. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotg(a, b float32) (c, s, r, z float32) { + if b == 0 && a == 0 { + return 1, 0, a, 0 + } + absA := math.Abs(a) + absB := math.Abs(b) + aGTb := absA > absB + r = math.Hypot(a, b) + if aGTb { + r = math.Copysign(r, a) + } else { + r = math.Copysign(r, b) + } + c = a / r + s = b / r + if aGTb { + z = s + } else if c != 0 { // r == 0 case handled above + z = 1 / c + } else { + z = 1 + } + return +} + +// Srotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotmg(d1, d2, x1, y1 float32) (p blas.SrotmParams, rd1, rd2, rx1 float32) { + // The implementation of Drotmg used here is taken from Hopkins 1997 + // Appendix A: https://doi.org/10.1145/289251.289253 + // with the exception of the gam constants below. + + const ( + gam = 4096.0 + gamsq = gam * gam + rgamsq = 1.0 / gamsq + ) + + if d1 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + if d2 == 0 || y1 == 0 { + p.Flag = blas.Identity + return p, d1, d2, x1 + } + + var h11, h12, h21, h22 float32 + if (d1 == 0 || x1 == 0) && d2 > 0 { + p.Flag = blas.Diagonal + h12 = 1 + h21 = -1 + x1 = y1 + d1, d2 = d2, d1 + } else { + p2 := d2 * y1 + p1 := d1 * x1 + q2 := p2 * y1 + q1 := p1 * x1 + if math.Abs(q1) > math.Abs(q2) { + p.Flag = blas.OffDiagonal + h11 = 1 + h22 = 1 + h21 = -y1 / x1 + h12 = p2 / p1 + u := 1 - h12*h21 + if u <= 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + d1 /= u + d2 /= u + x1 *= u + } else { + if q2 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + p.Flag = blas.Diagonal + h21 = -1 + h12 = 1 + h11 = p1 / p2 + h22 = x1 / y1 + u := 1 + h11*h22 + d1, d2 = d2/u, d1/u + x1 = y1 * u + } + } + + for d1 <= rgamsq && d1 != 0 { + p.Flag = blas.Rescaling + d1 = (d1 * gam) * gam + x1 /= gam + h11 /= gam + h12 /= gam + } + for d1 > gamsq { + p.Flag = blas.Rescaling + d1 = (d1 / gam) / gam + x1 *= gam + h11 *= gam + h12 *= gam + } + + for math.Abs(d2) <= rgamsq && d2 != 0 { + p.Flag = blas.Rescaling + d2 = (d2 * gam) * gam + h21 /= gam + h22 /= gam + } + for math.Abs(d2) > gamsq { + p.Flag = blas.Rescaling + d2 = (d2 / gam) / gam + h21 *= gam + h22 *= gam + } + + switch p.Flag { + case blas.Diagonal: + p.H = [4]float32{0: h11, 3: h22} + case blas.OffDiagonal: + p.H = [4]float32{1: h21, 2: h12} + case blas.Rescaling: + p.H = [4]float32{h11, h21, h12, h22} + default: + panic(badFlag) + } + + return p, d1, d2, x1 +} + +// Srot applies a plane transformation. +// x[i] = c * x[i] + s * y[i] +// y[i] = c * y[i] - s * x[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srot(n int, x []float32, incX int, y []float32, incY int, c float32, s float32) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = c*vx+s*vy, c*vy-s*vx + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx + ix += incX + iy += incY + } +} + +// Srotm applies the modified Givens rotation to the 2×n matrix. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotm(n int, x []float32, incX int, y []float32, incY int, p blas.SrotmParams) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + if p.Flag == blas.Identity { + return + } + + switch p.Flag { + case blas.Rescaling: + h11 := p.H[0] + h12 := p.H[2] + h21 := p.H[1] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 + ix += incX + iy += incY + } + case blas.OffDiagonal: + h12 := p.H[2] + h21 := p.H[1] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx+vy*h12, vx*h21+vy + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx+vy*h12, vx*h21+vy + ix += incX + iy += incY + } + case blas.Diagonal: + h11 := p.H[0] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy, -vx+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy, -vx+vy*h22 + ix += incX + iy += incY + } + } +} + +// Sscal scales x by alpha. +// x[i] *= alpha +// Sscal has no effect if incX < 0. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sscal(n int, alpha float32, x []float32, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + f32.ScalUnitary(alpha, x[:n]) + return + } + f32.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go new file mode 100644 index 00000000..089e0d8f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Dsdot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return f32.DdotUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(shortY) + } + return f32.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go new file mode 100644 index 00000000..41c3e792 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sdot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sdot(n int, x []float32, incX int, y []float32, incY int) float32 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return f32.DotUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(shortY) + } + return f32.DotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go new file mode 100644 index 00000000..69dd8aa1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sdsdot computes the dot product of the two vectors plus a constant +// alpha + \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return alpha + float32(f32.DdotUnitary(x[:n], y[:n])) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(shortY) + } + return alpha + float32(f32.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy))) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go new file mode 100644 index 00000000..2e8ed543 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go @@ -0,0 +1,620 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level1 = Implementation{} + +// Dnrm2 computes the Euclidean norm of a vector, +// sqrt(\sum_i x[i] * x[i]). +// This function returns 0 if incX is negative. +func (Implementation) Dnrm2(n int, x []float64, incX int) float64 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if n < 2 { + if n == 1 { + return math.Abs(x[0]) + } + if n == 0 { + return 0 + } + panic(nLT0) + } + var ( + scale float64 = 0 + sumSquares float64 = 1 + ) + if incX == 1 { + x = x[:n] + for _, v := range x { + if v == 0 { + continue + } + absxi := math.Abs(v) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) + } + for ix := 0; ix < n*incX; ix += incX { + val := x[ix] + if val == 0 { + continue + } + absxi := math.Abs(val) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} + +// Dasum computes the sum of the absolute values of the elements of x. +// \sum_i |x[i]| +// Dasum returns 0 if incX is negative. +func (Implementation) Dasum(n int, x []float64, incX int) float64 { + var sum float64 + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if incX == 1 { + x = x[:n] + for _, v := range x { + sum += math.Abs(v) + } + return sum + } + for i := 0; i < n; i++ { + sum += math.Abs(x[i*incX]) + } + return sum +} + +// Idamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Idamax returns -1 if n == 0. +func (Implementation) Idamax(n int, x []float64, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return -1 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if n < 2 { + if n == 1 { + return 0 + } + if n == 0 { + return -1 // Netlib returns invalid index when n == 0. + } + panic(nLT0) + } + idx := 0 + max := math.Abs(x[0]) + if incX == 1 { + for i, v := range x[:n] { + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + v := x[ix] + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Dswap exchanges the elements of two vectors. +// x[i], y[i] = y[i], x[i] for all i +func (Implementation) Dswap(n int, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} + +// Dcopy copies the elements of x into the elements of y. +// y[i] = x[i] for all i +func (Implementation) Dcopy(n int, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Daxpy adds alpha times x to y +// y[i] += alpha * x[i] for all i +func (Implementation) Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + f64.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + f64.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Drotg computes the plane rotation +// _ _ _ _ _ _ +// | c s | | a | | r | +// | -s c | * | b | = | 0 | +// ‾ ‾ ‾ ‾ ‾ ‾ +// where +// r = ±√(a^2 + b^2) +// c = a/r, the cosine of the plane rotation +// s = b/r, the sine of the plane rotation +// +// NOTE: There is a discrepancy between the reference implementation and the BLAS +// technical manual regarding the sign for r when a or b are zero. +// Drotg agrees with the definition in the manual and other +// common BLAS implementations. +func (Implementation) Drotg(a, b float64) (c, s, r, z float64) { + if b == 0 && a == 0 { + return 1, 0, a, 0 + } + absA := math.Abs(a) + absB := math.Abs(b) + aGTb := absA > absB + r = math.Hypot(a, b) + if aGTb { + r = math.Copysign(r, a) + } else { + r = math.Copysign(r, b) + } + c = a / r + s = b / r + if aGTb { + z = s + } else if c != 0 { // r == 0 case handled above + z = 1 / c + } else { + z = 1 + } + return +} + +// Drotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +func (Implementation) Drotmg(d1, d2, x1, y1 float64) (p blas.DrotmParams, rd1, rd2, rx1 float64) { + // The implementation of Drotmg used here is taken from Hopkins 1997 + // Appendix A: https://doi.org/10.1145/289251.289253 + // with the exception of the gam constants below. + + const ( + gam = 4096.0 + gamsq = gam * gam + rgamsq = 1.0 / gamsq + ) + + if d1 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + if d2 == 0 || y1 == 0 { + p.Flag = blas.Identity + return p, d1, d2, x1 + } + + var h11, h12, h21, h22 float64 + if (d1 == 0 || x1 == 0) && d2 > 0 { + p.Flag = blas.Diagonal + h12 = 1 + h21 = -1 + x1 = y1 + d1, d2 = d2, d1 + } else { + p2 := d2 * y1 + p1 := d1 * x1 + q2 := p2 * y1 + q1 := p1 * x1 + if math.Abs(q1) > math.Abs(q2) { + p.Flag = blas.OffDiagonal + h11 = 1 + h22 = 1 + h21 = -y1 / x1 + h12 = p2 / p1 + u := 1 - h12*h21 + if u <= 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + d1 /= u + d2 /= u + x1 *= u + } else { + if q2 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + p.Flag = blas.Diagonal + h21 = -1 + h12 = 1 + h11 = p1 / p2 + h22 = x1 / y1 + u := 1 + h11*h22 + d1, d2 = d2/u, d1/u + x1 = y1 * u + } + } + + for d1 <= rgamsq && d1 != 0 { + p.Flag = blas.Rescaling + d1 = (d1 * gam) * gam + x1 /= gam + h11 /= gam + h12 /= gam + } + for d1 > gamsq { + p.Flag = blas.Rescaling + d1 = (d1 / gam) / gam + x1 *= gam + h11 *= gam + h12 *= gam + } + + for math.Abs(d2) <= rgamsq && d2 != 0 { + p.Flag = blas.Rescaling + d2 = (d2 * gam) * gam + h21 /= gam + h22 /= gam + } + for math.Abs(d2) > gamsq { + p.Flag = blas.Rescaling + d2 = (d2 / gam) / gam + h21 *= gam + h22 *= gam + } + + switch p.Flag { + case blas.Diagonal: + p.H = [4]float64{0: h11, 3: h22} + case blas.OffDiagonal: + p.H = [4]float64{1: h21, 2: h12} + case blas.Rescaling: + p.H = [4]float64{h11, h21, h12, h22} + default: + panic(badFlag) + } + + return p, d1, d2, x1 +} + +// Drot applies a plane transformation. +// x[i] = c * x[i] + s * y[i] +// y[i] = c * y[i] - s * x[i] +func (Implementation) Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = c*vx+s*vy, c*vy-s*vx + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx + ix += incX + iy += incY + } +} + +// Drotm applies the modified Givens rotation to the 2×n matrix. +func (Implementation) Drotm(n int, x []float64, incX int, y []float64, incY int, p blas.DrotmParams) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + if p.Flag == blas.Identity { + return + } + + switch p.Flag { + case blas.Rescaling: + h11 := p.H[0] + h12 := p.H[2] + h21 := p.H[1] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 + ix += incX + iy += incY + } + case blas.OffDiagonal: + h12 := p.H[2] + h21 := p.H[1] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx+vy*h12, vx*h21+vy + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx+vy*h12, vx*h21+vy + ix += incX + iy += incY + } + case blas.Diagonal: + h11 := p.H[0] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy, -vx+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy, -vx+vy*h22 + ix += incX + iy += incY + } + } +} + +// Dscal scales x by alpha. +// x[i] *= alpha +// Dscal has no effect if incX < 0. +func (Implementation) Dscal(n int, alpha float64, x []float64, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + f64.ScalUnitary(alpha, x[:n]) + return + } + f64.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go new file mode 100644 index 00000000..be87ba13 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go @@ -0,0 +1,49 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Ddot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +func (Implementation) Ddot(n int, x []float64, incX int, y []float64, incY int) float64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return f64.DotUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(shortY) + } + return f64.DotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go new file mode 100644 index 00000000..03ee328f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go @@ -0,0 +1,2906 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math/cmplx" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c128" +) + +var _ blas.Complex128Level2 = Implementation{} + +// Zgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix +// with kL sub-diagonals and kU super-diagonals. +func (Implementation) Zgbmv(trans blas.Transpose, m, n, kL, kU int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { + panic(shortA) + } + var lenX, lenY int + if trans == blas.NoTrans { + lenX, lenY = n, m + } else { + lenX, lenY = m, n + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c128.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + nRow := min(m, n+kL) + nCol := kL + 1 + kU + switch trans { + case blas.NoTrans: + iy := ky + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) + xtmp := x[off : off+u-l] + var sum complex128 + for j, v := range aRow { + sum += xtmp[j] * v + } + y[iy] += alpha * sum + iy += incY + } + } else { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incX + jx := kx + var sum complex128 + for _, v := range aRow { + sum += x[off+jx] * v + jx += incX + } + y[iy] += alpha * sum + iy += incY + } + } + case blas.Trans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + ix += incX + } + } + case blas.ConjTrans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + ix += incX + } + } + } +} + +// Zgemv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. +func (Implementation) Zgemv(trans blas.Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + var lenX, lenY int + if trans == blas.NoTrans { + lenX = n + lenY = m + } else { + lenX = m + lenY = n + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c128.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + switch trans { + default: + // Form y = alpha*A*x + y. + iy := ky + if incX == 1 { + for i := 0; i < m; i++ { + y[iy] += alpha * c128.DotuUnitary(a[i*lda:i*lda+n], x[:n]) + iy += incY + } + return + } + for i := 0; i < m; i++ { + y[iy] += alpha * c128.DotuInc(a[i*lda:i*lda+n], x, uintptr(n), 1, uintptr(incX), 0, uintptr(kx)) + iy += incY + } + return + + case blas.Trans: + // Form y = alpha*A^T*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + c128.AxpyUnitary(alpha*x[ix], a[i*lda:i*lda+n], y[:n]) + ix += incX + } + return + } + for i := 0; i < m; i++ { + c128.AxpyInc(alpha*x[ix], a[i*lda:i*lda+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) + ix += incX + } + return + + case blas.ConjTrans: + // Form y = alpha*A^H*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + for j := 0; j < n; j++ { + y[j] += tmp * cmplx.Conj(a[i*lda+j]) + } + ix += incX + } + return + } + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + jy := ky + for j := 0; j < n; j++ { + y[jy] += tmp * cmplx.Conj(a[i*lda+j]) + jy += incY + } + ix += incX + } + return + } +} + +// Zgerc performs the rank-one operation +// A += alpha * x * y^H +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +func (Implementation) Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx, jy int + if incX < 0 { + kx = (1 - m) * incX + } + if incY < 0 { + jy = (1 - n) * incY + } + for j := 0; j < n; j++ { + if y[jy] != 0 { + tmp := alpha * cmplx.Conj(y[jy]) + c128.AxpyInc(tmp, x, a[j:], uintptr(m), uintptr(incX), uintptr(lda), uintptr(kx), 0) + } + jy += incY + } +} + +// Zgeru performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +func (Implementation) Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - m) * incX + } + if incY == 1 { + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c128.AxpyUnitary(tmp, y[:n], a[i*lda:i*lda+n]) + } + kx += incX + } + return + } + var jy int + if incY < 0 { + jy = (1 - n) * incY + } + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c128.AxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(jy), 0) + } + kx += incX + } +} + +// Zhbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian band matrix with k super-diagonals. The imaginary parts of +// the diagonal elements of A are ignored and assumed to be zero. +func (Implementation) Zhbmv(uplo blas.Uplo, n, k int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through a. + switch uplo { + case blas.Upper: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + aRow := a[i*lda:] + alphaxi := alpha * x[i] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[i+j] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += sum + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + aRow := a[i*lda:] + alphaxi := alpha * x[ix] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jx := incX + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + } + case blas.Lower: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[i] + jy := l * incY + aRow := a[i*lda:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[ix] + jx := l * incX + jy := l * incY + aRow := a[i*lda:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + ix += incX + iy += incY + } + } + } +} + +// Zhemv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix. The imaginary parts of the diagonal elements of A are +// ignored and assumed to be zero. +func (Implementation) Zhemv(uplo blas.Uplo, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through + // the triangular part of A. + + if uplo == blas.Upper { + // Form y when A is stored in upper triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := ix + jy := iy + for j := i + 1; j < n; j++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } + return + } + + // Form y when A is stored in lower triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := kx + jy := ky + for j := 0; j < i; j++ { + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } +} + +// Zher performs the Hermitian rank-one operation +// A += alpha * x * x^H +// where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n +// element vector. On entry, the imaginary parts of the diagonal elements of A +// are ignored and assumed to be zero, on return they will be set to zero. +func (Implementation) Zher(uplo blas.Uplo, n int, alpha float64, x []complex128, incX int, a []complex128, lda int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + jx := ix + incX + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } + return + } + + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + jx := kx + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } +} + +// Zher2 performs the Hermitian rank-two operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a scalar, x and y are n element vectors and A is an n×n +// Hermitian matrix. On entry, the imaginary parts of the diagonal elements are +// ignored and assumed to be zero. On return they will be set to zero. +func (Implementation) Zher2(uplo blas.Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx, ky int + var ix, iy int + if incX != 1 || incY != 1 { + if incX < 0 { + kx = (1 - n) * incX + } + if incY < 0 { + ky = (1 - n) * incY + } + ix = kx + iy = ky + } + if uplo == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } + return + } + + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } +} + +// Zhpmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix in packed form. The imaginary parts of the diagonal +// elements of A are ignored and assumed to be zero. +func (Implementation) Zhpmv(uplo blas.Uplo, n int, alpha complex128, ap []complex128, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] *= beta + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form y when ap contains the upper triangle. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + y[i] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex128 + k := kk + 1 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + y[i] += alpha * tmp2 + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + y[iy] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex128 + jx := ix + jy := iy + for k := kk + 1; k < kk+n-i; k++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + } + y[iy] += alpha * tmp2 + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form y when ap contains the lower triangle. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + k := kk + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + aii := complex(real(ap[kk+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(ap[kk+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Zhpr performs the Hermitian rank-1 operation +// A += alpha * x * x^H +// where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix +// in packed form. On entry, the imaginary parts of the diagonal elements are +// assumed to be zero, and on return they are set to zero. +func (Implementation) Zhpr(uplo blas.Uplo, n int, alpha float64, x []complex128, incX int, ap []complex128) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + a := ap[kk+1 : kk+n-i] + x := x[i+1 : n] + for j, v := range x { + a[j] += tmp * cmplx.Conj(v) + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + jx := ix + incX + a := ap[kk+1 : kk+n-i] + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + for j, v := range x[:i] { + a[j] += tmp * cmplx.Conj(v) + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + jx := kx + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + kk += i + 1 + } + } +} + +// Zhpr2 performs the Hermitian rank-2 operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a complex scalar, x and y are n element vectors, and A is an +// n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts +// of the diagonal elements are assumed to be zero, and on return they are set to zero. +func (Implementation) Zhpr2(uplo blas.Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, ap []complex128) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + // Set up start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk] = complex(aii, 0) + k := kk + 1 + for j := i + 1; j < n; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for k := kk + 1; k < kk+n-i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + k := kk + for j := 0; j < i; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Ztbmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular band matrix, with +// (k+1) diagonals. +func (Implementation) Ztbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex128, lda int, x []complex128, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if diag == blas.NonUnit { + xi *= a[i*lda] + } + kk := min(k, n-i-1) + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + xi += x[i+j+1] * aij + } + x[i] = xi + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if diag == blas.NonUnit { + xi *= a[i*lda] + } + kk := min(k, n-i-1) + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + xi *= a[i*lda+k] + } + kk := min(k, i) + for j, aij := range a[i*lda+k-kk : i*lda+k] { + xi += x[i-kk+j] * aij + } + x[i] = xi + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + xi *= a[i*lda+k] + } + kk := min(k, i) + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix -= incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+j+1] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= a[i*lda] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+k] + } + ix += incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+j+1] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda]) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+k]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+k]) + } + ix += incX + } + } + } + } +} + +// Ztbsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular band matrix +// with (k+1) diagonals. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex128, lda int, x []complex128, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex128 + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + sum += x[i+1+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= a[i*lda] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex128 + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= a[i*lda] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex128 + for j, aij := range a[i*lda+k-kk : i*lda+k] { + sum += x[i-kk+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= a[i*lda+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex128 + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= a[i*lda+k] + } + ix += incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= a[i*lda] + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+1+j] -= xi * aij + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= a[i*lda] + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] -= xi * aij + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= a[i*lda+k] + } + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] -= xi * aij + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= a[i*lda+k] + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] -= xi * aij + jx += incX + } + ix -= incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(a[i*lda]) + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+1+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(a[i*lda]) + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(a[i*lda+k]) + } + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(a[i*lda+k]) + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix -= incX + } + } + } + } +} + +// Ztpmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular matrix, supplied in +// packed form. +func (Implementation) Ztpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + x[i] += c128.DotuUnitary(ap[kk+1:kk+n-i], x[i+1:]) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + x[ix] += c128.DotuInc(ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + kk += n - i + } + } + } else { + // kk points to the beginning of current row in ap. + kk := n*(n+1)/2 - n + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + if i > 0 { + x[i] += c128.DotuUnitary(ap[kk:kk+i], x[:i]) + } + kk -= i + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + if i > 0 { + x[ix] += c128.DotuInc(ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + kk -= i + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + c128.AxpyUnitary(xi, ap[kk+1:kk+n-i], x[i+1:n]) + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + c128.AxpyInc(xi, ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if i > 0 { + c128.AxpyUnitary(x[i], ap[kk:kk+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyInc(x[ix], ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk]) + } + k := kk + 1 + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(ap[k]) + k++ + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk]) + } + jx := ix + incX + k := kk + 1 + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(ap[k]) + jx += incX + k++ + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i, xi := range x { + for j := 0; j < i; j++ { + x[j] += xi * cmplx.Conj(ap[kk+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk+i]) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + jx := kx + for j := 0; j < i; j++ { + x[jx] += xi * cmplx.Conj(ap[kk+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk+i]) + } + ix += incX + kk += i + 1 + } + } + } +} + +// Ztpsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix in +// packed form. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[i] -= c128.DotuUnitary(x[i+1:n], ap[kk+1:kk+n-i]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[ix] -= c128.DotuInc(x, ap[kk+1:kk+n-i], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + kk := 0 + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c128.DotuUnitary(x[:i], ap[kk:kk+i]) + } + if diag == blas.NonUnit { + x[i] /= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c128.DotuInc(x, ap[kk:kk+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= ap[kk] + } + if n-j-1 > 0 { + c128.AxpyUnitary(-x[j], ap[kk+1:kk+n-j], x[j+1:n]) + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= ap[kk] + } + if n-j-1 > 0 { + c128.AxpyInc(-x[jx], ap[kk+1:kk+n-j], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= ap[kk+j] + } + if j > 0 { + c128.AxpyUnitary(-x[j], ap[kk:kk+j], x[:j]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= ap[kk+j] + } + if j > 0 { + c128.AxpyInc(-x[jx], ap[kk:kk+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + kk -= j + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk]) + } + xj := x[j] + k := kk + 1 + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(ap[k]) + k++ + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk]) + } + xj := x[jx] + ix := jx + incX + k := kk + 1 + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(ap[k]) + ix += incX + k++ + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(ap[kk+i]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(ap[kk+i]) + ix += incX + } + jx -= incX + kk -= j + } + } + } +} + +// Ztrmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is a vector, and A is an n×n triangular matrix. +func (Implementation) Ztrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[i] += c128.DotuUnitary(a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[ix] += c128.DotuInc(a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if i > 0 { + x[i] += c128.DotuUnitary(a[i*lda:i*lda+i], x[:i]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if i > 0 { + x[ix] += c128.DotuInc(a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + c128.AxpyUnitary(xi, a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + c128.AxpyInc(xi, a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyUnitary(x[i], a[i*lda:i*lda+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyInc(x[ix], a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(a[i*lda+j]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + jx := ix + incX + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(a[i*lda+j]) + jx += incX + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + x[j] += x[i] * cmplx.Conj(a[i*lda+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + jx := kx + for j := 0; j < i; j++ { + x[jx] += x[ix] * cmplx.Conj(a[i*lda+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + ix += incX + } + } + } +} + +// Ztrsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztrsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[i] -= c128.DotuUnitary(x[i+1:n], a[i*lda+i+1:i*lda+n]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[ix] -= c128.DotuInc(x, a[i*lda+i+1:i*lda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c128.DotuUnitary(x[:i], a[i*lda:i*lda+i]) + } + if diag == blas.NonUnit { + x[i] /= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c128.DotuInc(x, a[i*lda:i*lda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + if n-j-1 > 0 { + c128.AxpyUnitary(-x[j], a[j*lda+j+1:j*lda+n], x[j+1:n]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if n-j-1 > 0 { + c128.AxpyInc(-x[jx], a[j*lda+j+1:j*lda+n], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + xj := x[j] + if j > 0 { + c128.AxpyUnitary(-xj, a[j*lda:j*lda+j], x[:j]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if j > 0 { + c128.AxpyInc(-x[jx], a[j*lda:j*lda+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := jx + incX + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx -= incX + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go new file mode 100644 index 00000000..10faf8f7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go @@ -0,0 +1,2942 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + cmplx "gonum.org/v1/gonum/internal/cmplx64" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c64" +) + +var _ blas.Complex64Level2 = Implementation{} + +// Cgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix +// with kL sub-diagonals and kU super-diagonals. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgbmv(trans blas.Transpose, m, n, kL, kU int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { + panic(shortA) + } + var lenX, lenY int + if trans == blas.NoTrans { + lenX, lenY = n, m + } else { + lenX, lenY = m, n + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c64.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + nRow := min(m, n+kL) + nCol := kL + 1 + kU + switch trans { + case blas.NoTrans: + iy := ky + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) + xtmp := x[off : off+u-l] + var sum complex64 + for j, v := range aRow { + sum += xtmp[j] * v + } + y[iy] += alpha * sum + iy += incY + } + } else { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incX + jx := kx + var sum complex64 + for _, v := range aRow { + sum += x[off+jx] * v + jx += incX + } + y[iy] += alpha * sum + iy += incY + } + } + case blas.Trans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + ix += incX + } + } + case blas.ConjTrans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + ix += incX + } + } + } +} + +// Cgemv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgemv(trans blas.Transpose, m, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + var lenX, lenY int + if trans == blas.NoTrans { + lenX = n + lenY = m + } else { + lenX = m + lenY = n + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c64.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + switch trans { + default: + // Form y = alpha*A*x + y. + iy := ky + if incX == 1 { + for i := 0; i < m; i++ { + y[iy] += alpha * c64.DotuUnitary(a[i*lda:i*lda+n], x[:n]) + iy += incY + } + return + } + for i := 0; i < m; i++ { + y[iy] += alpha * c64.DotuInc(a[i*lda:i*lda+n], x, uintptr(n), 1, uintptr(incX), 0, uintptr(kx)) + iy += incY + } + return + + case blas.Trans: + // Form y = alpha*A^T*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + c64.AxpyUnitary(alpha*x[ix], a[i*lda:i*lda+n], y[:n]) + ix += incX + } + return + } + for i := 0; i < m; i++ { + c64.AxpyInc(alpha*x[ix], a[i*lda:i*lda+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) + ix += incX + } + return + + case blas.ConjTrans: + // Form y = alpha*A^H*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + for j := 0; j < n; j++ { + y[j] += tmp * cmplx.Conj(a[i*lda+j]) + } + ix += incX + } + return + } + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + jy := ky + for j := 0; j < n; j++ { + y[jy] += tmp * cmplx.Conj(a[i*lda+j]) + jy += incY + } + ix += incX + } + return + } +} + +// Cgerc performs the rank-one operation +// A += alpha * x * y^H +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgerc(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx, jy int + if incX < 0 { + kx = (1 - m) * incX + } + if incY < 0 { + jy = (1 - n) * incY + } + for j := 0; j < n; j++ { + if y[jy] != 0 { + tmp := alpha * cmplx.Conj(y[jy]) + c64.AxpyInc(tmp, x, a[j:], uintptr(m), uintptr(incX), uintptr(lda), uintptr(kx), 0) + } + jy += incY + } +} + +// Cgeru performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgeru(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - m) * incX + } + if incY == 1 { + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c64.AxpyUnitary(tmp, y[:n], a[i*lda:i*lda+n]) + } + kx += incX + } + return + } + var jy int + if incY < 0 { + jy = (1 - n) * incY + } + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c64.AxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(jy), 0) + } + kx += incX + } +} + +// Chbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian band matrix with k super-diagonals. The imaginary parts of +// the diagonal elements of A are ignored and assumed to be zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chbmv(uplo blas.Uplo, n, k int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through a. + switch uplo { + case blas.Upper: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + aRow := a[i*lda:] + alphaxi := alpha * x[i] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[i+j] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += sum + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + aRow := a[i*lda:] + alphaxi := alpha * x[ix] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jx := incX + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + } + case blas.Lower: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[i] + jy := l * incY + aRow := a[i*lda:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[ix] + jx := l * incX + jy := l * incY + aRow := a[i*lda:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + ix += incX + iy += incY + } + } + } +} + +// Chemv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix. The imaginary parts of the diagonal elements of A are +// ignored and assumed to be zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chemv(uplo blas.Uplo, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through + // the triangular part of A. + + if uplo == blas.Upper { + // Form y when A is stored in upper triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex64 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex64 + jx := ix + jy := iy + for j := i + 1; j < n; j++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } + return + } + + // Form y when A is stored in lower triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex64 + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex64 + jx := kx + jy := ky + for j := 0; j < i; j++ { + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } +} + +// Cher performs the Hermitian rank-one operation +// A += alpha * x * x^H +// where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n +// element vector. On entry, the imaginary parts of the diagonal elements of A +// are ignored and assumed to be zero, on return they will be set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cher(uplo blas.Uplo, n int, alpha float32, x []complex64, incX int, a []complex64, lda int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + jx := ix + incX + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } + return + } + + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + jx := kx + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } +} + +// Cher2 performs the Hermitian rank-two operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a scalar, x and y are n element vectors and A is an n×n +// Hermitian matrix. On entry, the imaginary parts of the diagonal elements are +// ignored and assumed to be zero. On return they will be set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cher2(uplo blas.Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx, ky int + var ix, iy int + if incX != 1 || incY != 1 { + if incX < 0 { + kx = (1 - n) * incX + } + if incY < 0 { + ky = (1 - n) * incY + } + ix = kx + iy = ky + } + if uplo == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } + return + } + + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } +} + +// Chpmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix in packed form. The imaginary parts of the diagonal +// elements of A are ignored and assumed to be zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chpmv(uplo blas.Uplo, n int, alpha complex64, ap []complex64, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] *= beta + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form y when ap contains the upper triangle. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + y[i] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex64 + k := kk + 1 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + y[i] += alpha * tmp2 + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + y[iy] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex64 + jx := ix + jy := iy + for k := kk + 1; k < kk+n-i; k++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + } + y[iy] += alpha * tmp2 + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form y when ap contains the lower triangle. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex64 + k := kk + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + aii := complex(real(ap[kk+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex64 + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(ap[kk+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Chpr performs the Hermitian rank-1 operation +// A += alpha * x * x^H +// where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix +// in packed form. On entry, the imaginary parts of the diagonal elements are +// assumed to be zero, and on return they are set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chpr(uplo blas.Uplo, n int, alpha float32, x []complex64, incX int, ap []complex64) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + a := ap[kk+1 : kk+n-i] + x := x[i+1 : n] + for j, v := range x { + a[j] += tmp * cmplx.Conj(v) + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + jx := ix + incX + a := ap[kk+1 : kk+n-i] + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + for j, v := range x[:i] { + a[j] += tmp * cmplx.Conj(v) + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + jx := kx + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + kk += i + 1 + } + } +} + +// Chpr2 performs the Hermitian rank-2 operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a complex scalar, x and y are n element vectors, and A is an +// n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts +// of the diagonal elements are assumed to be zero, and on return they are set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chpr2(uplo blas.Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, ap []complex64) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + // Set up start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk] = complex(aii, 0) + k := kk + 1 + for j := i + 1; j < n; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for k := kk + 1; k < kk+n-i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + k := kk + for j := 0; j < i; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Ctbmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular band matrix, with +// (k+1) diagonals. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex64, lda int, x []complex64, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if diag == blas.NonUnit { + xi *= a[i*lda] + } + kk := min(k, n-i-1) + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + xi += x[i+j+1] * aij + } + x[i] = xi + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if diag == blas.NonUnit { + xi *= a[i*lda] + } + kk := min(k, n-i-1) + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + xi *= a[i*lda+k] + } + kk := min(k, i) + for j, aij := range a[i*lda+k-kk : i*lda+k] { + xi += x[i-kk+j] * aij + } + x[i] = xi + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + xi *= a[i*lda+k] + } + kk := min(k, i) + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix -= incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+j+1] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= a[i*lda] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+k] + } + ix += incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+j+1] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda]) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+k]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+k]) + } + ix += incX + } + } + } + } +} + +// Ctbsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular band matrix +// with (k+1) diagonals. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex64, lda int, x []complex64, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex64 + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + sum += x[i+1+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= a[i*lda] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex64 + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= a[i*lda] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex64 + for j, aij := range a[i*lda+k-kk : i*lda+k] { + sum += x[i-kk+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= a[i*lda+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex64 + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= a[i*lda+k] + } + ix += incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= a[i*lda] + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+1+j] -= xi * aij + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= a[i*lda] + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] -= xi * aij + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= a[i*lda+k] + } + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] -= xi * aij + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= a[i*lda+k] + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] -= xi * aij + jx += incX + } + ix -= incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(a[i*lda]) + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+1+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(a[i*lda]) + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(a[i*lda+k]) + } + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(a[i*lda+k]) + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix -= incX + } + } + } + } +} + +// Ctpmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular matrix, supplied in +// packed form. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex64, x []complex64, incX int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + x[i] += c64.DotuUnitary(ap[kk+1:kk+n-i], x[i+1:]) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + x[ix] += c64.DotuInc(ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + kk += n - i + } + } + } else { + // kk points to the beginning of current row in ap. + kk := n*(n+1)/2 - n + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + if i > 0 { + x[i] += c64.DotuUnitary(ap[kk:kk+i], x[:i]) + } + kk -= i + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + if i > 0 { + x[ix] += c64.DotuInc(ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + kk -= i + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + c64.AxpyUnitary(xi, ap[kk+1:kk+n-i], x[i+1:n]) + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + c64.AxpyInc(xi, ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if i > 0 { + c64.AxpyUnitary(x[i], ap[kk:kk+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c64.AxpyInc(x[ix], ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk]) + } + k := kk + 1 + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(ap[k]) + k++ + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk]) + } + jx := ix + incX + k := kk + 1 + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(ap[k]) + jx += incX + k++ + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i, xi := range x { + for j := 0; j < i; j++ { + x[j] += xi * cmplx.Conj(ap[kk+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk+i]) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + jx := kx + for j := 0; j < i; j++ { + x[jx] += xi * cmplx.Conj(ap[kk+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk+i]) + } + ix += incX + kk += i + 1 + } + } + } +} + +// Ctpsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix in +// packed form. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex64, x []complex64, incX int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[i] -= c64.DotuUnitary(x[i+1:n], ap[kk+1:kk+n-i]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[ix] -= c64.DotuInc(x, ap[kk+1:kk+n-i], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + kk := 0 + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c64.DotuUnitary(x[:i], ap[kk:kk+i]) + } + if diag == blas.NonUnit { + x[i] /= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c64.DotuInc(x, ap[kk:kk+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= ap[kk] + } + if n-j-1 > 0 { + c64.AxpyUnitary(-x[j], ap[kk+1:kk+n-j], x[j+1:n]) + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= ap[kk] + } + if n-j-1 > 0 { + c64.AxpyInc(-x[jx], ap[kk+1:kk+n-j], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= ap[kk+j] + } + if j > 0 { + c64.AxpyUnitary(-x[j], ap[kk:kk+j], x[:j]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= ap[kk+j] + } + if j > 0 { + c64.AxpyInc(-x[jx], ap[kk:kk+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + kk -= j + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk]) + } + xj := x[j] + k := kk + 1 + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(ap[k]) + k++ + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk]) + } + xj := x[jx] + ix := jx + incX + k := kk + 1 + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(ap[k]) + ix += incX + k++ + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(ap[kk+i]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(ap[kk+i]) + ix += incX + } + jx -= incX + kk -= j + } + } + } +} + +// Ctrmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is a vector, and A is an n×n triangular matrix. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex64, lda int, x []complex64, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[i] += c64.DotuUnitary(a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[ix] += c64.DotuInc(a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if i > 0 { + x[i] += c64.DotuUnitary(a[i*lda:i*lda+i], x[:i]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if i > 0 { + x[ix] += c64.DotuInc(a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + c64.AxpyUnitary(xi, a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + c64.AxpyInc(xi, a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + c64.AxpyUnitary(x[i], a[i*lda:i*lda+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c64.AxpyInc(x[ix], a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(a[i*lda+j]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + jx := ix + incX + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(a[i*lda+j]) + jx += incX + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + x[j] += x[i] * cmplx.Conj(a[i*lda+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + jx := kx + for j := 0; j < i; j++ { + x[jx] += x[ix] * cmplx.Conj(a[i*lda+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + ix += incX + } + } + } +} + +// Ctrsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctrsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex64, lda int, x []complex64, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[i] -= c64.DotuUnitary(x[i+1:n], a[i*lda+i+1:i*lda+n]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[ix] -= c64.DotuInc(x, a[i*lda+i+1:i*lda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c64.DotuUnitary(x[:i], a[i*lda:i*lda+i]) + } + if diag == blas.NonUnit { + x[i] /= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c64.DotuInc(x, a[i*lda:i*lda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + if n-j-1 > 0 { + c64.AxpyUnitary(-x[j], a[j*lda+j+1:j*lda+n], x[j+1:n]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if n-j-1 > 0 { + c64.AxpyInc(-x[jx], a[j*lda+j+1:j*lda+n], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + xj := x[j] + if j > 0 { + c64.AxpyUnitary(-xj, a[j*lda:j*lda+j], x[:j]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if j > 0 { + c64.AxpyInc(-x[jx], a[j*lda:j*lda+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := jx + incX + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx -= incX + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go new file mode 100644 index 00000000..08e1927f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go @@ -0,0 +1,2296 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level2 = Implementation{} + +// Sger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + f32.Ger(uintptr(m), uintptr(n), + alpha, + x, uintptr(incX), + y, uintptr(incY), + a, uintptr(lda)) +} + +// Sgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if tA == blas.NoTrans +// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans +// where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, +// x and y are vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { + panic(shortA) + } + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + f32.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f32.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + f32.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + // i and j are indices of the compacted banded matrix. + // off is the offset into the dense matrix (off + j = densej) + nCol := kU + 1 + kL + if tA == blas.NoTrans { + iy := ky + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + xtmp := x[off : off+u-l] + var sum float32 + for j, v := range atmp { + sum += xtmp[j] * v + } + y[iy] += sum * alpha + iy += incY + } + return + } + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + jx := kx + var sum float32 + for _, v := range atmp { + sum += x[off*incX+jx] * v + jx += incX + } + y[iy] += sum * alpha + iy += incY + } + return + } + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[i] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + } + return + } + ix := kx + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[ix] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + ix += incX + } +} + +// Strmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + nonUnit := d != blas.Unit + if n == 1 { + if nonUnit { + x[0] *= a[0] + } + return + } + var kx int + if incX <= 0 { + kx = -(n - 1) * incX + } + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f32.DotUnitary(a[ilda+i+1:ilda+n], x[i+1:n]) + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f32.DotInc(x, a[ilda+i+1:ilda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp += a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f32.DotUnitary(a[ilda:ilda+i], x[:i]) + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f32.DotInc(x, a[ilda:ilda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + ix -= incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[i] + f32.AxpyUnitary(xi, a[ilda+i+1:ilda+n], x[i+1:n]) + if nonUnit { + x[i] *= a[ilda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[ix] + f32.AxpyInc(xi, a[ilda+i+1:ilda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(kx+(i+1)*incX)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[i] + f32.AxpyUnitary(xi, a[ilda:ilda+i], x[:i]) + if nonUnit { + x[i] *= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[ix] + f32.AxpyInc(xi, a[ilda:ilda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix += incX + } +} + +// Strsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + if n == 1 { + if d == blas.NonUnit { + x[0] /= a[0] + } + return + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + var sum float32 + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := i + j + 1 + sum += x[jv] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + var sum float32 + jx := ix + incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + var sum float32 + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + sum += x[j] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + var sum float32 + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := j + i + 1 + x[jv] -= v * xi + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + (i+1)*incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + x[j] -= v * xi + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix -= incX + } +} + +// Ssymv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssymv(ul blas.Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f32.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * a[0] * x[0] + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := x[i] * a[i*lda+i] + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jp := j + i + 1 + sum += x[jp] * v + y[jy] += xv * v + jy += incY + } + y[iy] += alpha * sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := x[ix] * a[i*lda+i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + jy := ky + xv := alpha * x[i] + atmp := a[i*lda : i*lda+i] + var sum float32 + for j, v := range atmp { + sum += x[j] * v + y[jy] += xv * v + jy += incY + } + sum += x[i] * a[i*lda+i] + sum *= alpha + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xv := alpha * x[ix] + atmp := a[i*lda : i*lda+i] + var sum float32 + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + sum += x[ix] * a[i*lda+i] + sum *= alpha + y[iy] += sum + ix += incX + iy += incY + } +} + +// Stbmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonunit := d != blas.Unit + + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float32 + atmp := a[i*lda:] + xtmp := x[i:] + for j := 1; j < u; j++ { + sum += xtmp[j] * atmp[j] + } + if nonunit { + sum += xtmp[0] * atmp[0] + } else { + sum += xtmp[0] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float32 + atmp := a[i*lda:] + jx := incX + for j := 1; j < u; j++ { + sum += x[ix+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[0] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float32 + for j := l; j < k; j++ { + sum += x[i-k+j] * atmp[j] + } + if nonunit { + sum += x[i] * atmp[k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float32 + jx := l * incX + for j := l; j < k; j++ { + sum += x[ix-k*incX+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[k] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float32 + for j := 1; j < u; j++ { + sum += x[i-j] * a[(i-j)*lda+j] + } + if nonunit { + sum += x[i] * a[i*lda] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float32 + jx := incX + for j := 1; j < u; j++ { + sum += x[ix-jx] * a[(i-j)*lda+j] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var sum float32 + for j := 0; j < u; j++ { + sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] + } + if nonunit { + sum += x[i] * a[i*lda+k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var ( + sum float32 + jx int + ) + for j := 0; j < u; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda+k] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } +} + +// Stpmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xi += v * xtmp[j] + } + x[i] = xi + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset += n - i + ix += incX + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset-i : offset] + for j, v := range atmp { + xi += v * x[j] + } + x[i] = xi + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset -= i + 1 + ix -= incX + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + jx := kx + (i+1)*incX + atmp := ap[offset+1 : offset+n-i] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + offset -= n - i + 1 + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + jx := kx + atmp := ap[offset-i : offset] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + ix += incX + offset += i + 2 + } +} + +// Stbsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, +// and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + // Form x = A^-1 x. + // Several cases below use subslices for speed improvement. + // The incX != 1 cases usually do not because incX may be negative. + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + atmp := a[i*lda+1:] + xtmp := x[i+1 : i+bands+1] + var sum float32 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + max := k + 1 + if i+max > n { + max = n - i + } + atmp := a[i*lda:] + var ( + jx int + sum float32 + ) + for j := 1; j < max; j++ { + jx += incX + sum += x[ix+jx] * atmp[j] + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[0] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + xtmp := x[i-bands : i] + var sum float32 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= atmp[bands] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * atmp[j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[bands] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var sum float32 + for j := 0; j < bands; j++ { + sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda] + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var sum float32 + xtmp := x[i+1 : i+1+bands] + for j, v := range xtmp { + sum += v * a[(i+j+1)*lda+k-j-1] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+k] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+k] + } + ix -= incX + } +} + +// Ssbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric band matrix with k super-diagonals, x and y are +// vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssbmv(ul blas.Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up indexes + lenX := n + lenY := n + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f32.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[i] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[i+j] * v + y[iy+jy] += tmp * v + jy += incY + } + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[ix] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jx := incX + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + return + } + + // Casses where a has bands below the diagonal. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[i] + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += tmp * v + jy += incY + } + y[iy] += tmp * atmp[k] + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[ix] + jx := l * incX + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += tmp * atmp[k] + ix += incX + iy += incY + } +} + +// Ssyr performs the symmetric rank-one update +// A += alpha * x * x^T +// where A is an n×n symmetric matrix, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda+i : i*lda+n] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += v * tmp + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + jx := ix + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += x[jx] * tmp + jx += incX + } + } + ix += incX + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda:] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += tmp * v + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + atmp := a[i*lda:] + jx := kx + for j := 0; j < i+1; j++ { + atmp[j] += tmp * x[jx] + jx += incX + } + } + ix += incX + } +} + +// Ssyr2 performs the symmetric rank-two update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } +} + +// Stpsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + offset = n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + var sum float32 + for j, v := range atmp { + sum += v * xtmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix -= incX + offset -= n - i + 1 + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i : offset] + var sum float32 + for j, v := range atmp { + sum += v * x[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i : offset] + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix += incX + offset += i + 2 + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] -= v * xi + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] -= v * xi + } + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix -= incX + offset -= i + 1 + } +} + +// Sspmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspmv(ul blas.Uplo, n int, alpha float32, ap []float32, x []float32, incX int, beta float32, y []float32, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f32.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * ap[0] * x[0] + return + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := ap[offset] * x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + jy := ky + (i+1)*incY + for j, v := range atmp { + sum += v * xtmp[j] + y[jy] += v * xv + jy += incY + } + y[iy] += alpha * sum + iy += incY + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := ap[offset] * x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + atmp := ap[offset-i : offset] + jy := ky + var sum float32 + for j, v := range atmp { + sum += v * x[j] + y[jy] += v * xv + jy += incY + } + sum += ap[offset] * x[i] + y[iy] += alpha * sum + iy += incY + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + atmp := ap[offset-i : offset] + jx := kx + jy := ky + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + + sum += ap[offset] * x[ix] + y[iy] += alpha * sum + ix += incX + iy += incY + offset += i + 2 + } +} + +// Sspr performs the symmetric rank-one operation +// A += alpha * x * x^T +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, ap []float32) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xv := alpha * x[i] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + i*incX + atmp := ap[offset:] + xv := alpha * x[ix] + for j := 0; j < n-i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xv := alpha * x[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i:] + xv := alpha * x[ix] + for j := 0; j <= i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += i + 2 + } +} + +// Sspr2 performs the symmetric rank-2 update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, ap []float32) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xi := x[i] + yi := y[i] + xtmp := x[i:n] + ytmp := y[i:n] + for j, v := range xtmp { + atmp[j] += alpha * (xi*ytmp[j] + v*yi) + } + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + atmp := ap[offset:] + xi := x[ix] + yi := y[iy] + for j := 0; j < n-i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xi := x[i] + yi := y[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += alpha * (xi*y[j] + v*yi) + } + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + atmp := ap[offset-i:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += i + 2 + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go new file mode 100644 index 00000000..26125788 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go @@ -0,0 +1,2264 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level2 = Implementation{} + +// Dger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func (Implementation) Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + f64.Ger(uintptr(m), uintptr(n), + alpha, + x, uintptr(incX), + y, uintptr(incY), + a, uintptr(lda)) +} + +// Dgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if tA == blas.NoTrans +// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans +// where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, +// x and y are vectors, and alpha and beta are scalars. +func (Implementation) Dgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { + panic(shortA) + } + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + f64.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + f64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + // i and j are indices of the compacted banded matrix. + // off is the offset into the dense matrix (off + j = densej) + nCol := kU + 1 + kL + if tA == blas.NoTrans { + iy := ky + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + xtmp := x[off : off+u-l] + var sum float64 + for j, v := range atmp { + sum += xtmp[j] * v + } + y[iy] += sum * alpha + iy += incY + } + return + } + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + jx := kx + var sum float64 + for _, v := range atmp { + sum += x[off*incX+jx] * v + jx += incX + } + y[iy] += sum * alpha + iy += incY + } + return + } + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[i] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + } + return + } + ix := kx + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[ix] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + ix += incX + } +} + +// Dtrmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x is a vector. +func (Implementation) Dtrmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + nonUnit := d != blas.Unit + if n == 1 { + if nonUnit { + x[0] *= a[0] + } + return + } + var kx int + if incX <= 0 { + kx = -(n - 1) * incX + } + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f64.DotUnitary(a[ilda+i+1:ilda+n], x[i+1:n]) + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f64.DotInc(x, a[ilda+i+1:ilda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp += a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f64.DotUnitary(a[ilda:ilda+i], x[:i]) + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f64.DotInc(x, a[ilda:ilda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + ix -= incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[i] + f64.AxpyUnitary(xi, a[ilda+i+1:ilda+n], x[i+1:n]) + if nonUnit { + x[i] *= a[ilda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[ix] + f64.AxpyInc(xi, a[ilda+i+1:ilda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(kx+(i+1)*incX)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[i] + f64.AxpyUnitary(xi, a[ilda:ilda+i], x[:i]) + if nonUnit { + x[i] *= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[ix] + f64.AxpyInc(xi, a[ilda:ilda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix += incX + } +} + +// Dtrsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtrsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + if n == 1 { + if d == blas.NonUnit { + x[0] /= a[0] + } + return + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + var sum float64 + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := i + j + 1 + sum += x[jv] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + var sum float64 + jx := ix + incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + var sum float64 + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + sum += x[j] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + var sum float64 + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := j + i + 1 + x[jv] -= v * xi + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + (i+1)*incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + x[j] -= v * xi + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix -= incX + } +} + +// Dsymv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +func (Implementation) Dsymv(ul blas.Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f64.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * a[0] * x[0] + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := x[i] * a[i*lda+i] + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jp := j + i + 1 + sum += x[jp] * v + y[jy] += xv * v + jy += incY + } + y[iy] += alpha * sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := x[ix] * a[i*lda+i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + jy := ky + xv := alpha * x[i] + atmp := a[i*lda : i*lda+i] + var sum float64 + for j, v := range atmp { + sum += x[j] * v + y[jy] += xv * v + jy += incY + } + sum += x[i] * a[i*lda+i] + sum *= alpha + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xv := alpha * x[ix] + atmp := a[i*lda : i*lda+i] + var sum float64 + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + sum += x[ix] * a[i*lda+i] + sum *= alpha + y[iy] += sum + ix += incX + iy += incY + } +} + +// Dtbmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. +func (Implementation) Dtbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonunit := d != blas.Unit + + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float64 + atmp := a[i*lda:] + xtmp := x[i:] + for j := 1; j < u; j++ { + sum += xtmp[j] * atmp[j] + } + if nonunit { + sum += xtmp[0] * atmp[0] + } else { + sum += xtmp[0] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float64 + atmp := a[i*lda:] + jx := incX + for j := 1; j < u; j++ { + sum += x[ix+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[0] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float64 + for j := l; j < k; j++ { + sum += x[i-k+j] * atmp[j] + } + if nonunit { + sum += x[i] * atmp[k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float64 + jx := l * incX + for j := l; j < k; j++ { + sum += x[ix-k*incX+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[k] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float64 + for j := 1; j < u; j++ { + sum += x[i-j] * a[(i-j)*lda+j] + } + if nonunit { + sum += x[i] * a[i*lda] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float64 + jx := incX + for j := 1; j < u; j++ { + sum += x[ix-jx] * a[(i-j)*lda+j] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var sum float64 + for j := 0; j < u; j++ { + sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] + } + if nonunit { + sum += x[i] * a[i*lda+k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var ( + sum float64 + jx int + ) + for j := 0; j < u; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda+k] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } +} + +// Dtpmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x is a vector. +func (Implementation) Dtpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xi += v * xtmp[j] + } + x[i] = xi + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset += n - i + ix += incX + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset-i : offset] + for j, v := range atmp { + xi += v * x[j] + } + x[i] = xi + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset -= i + 1 + ix -= incX + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + jx := kx + (i+1)*incX + atmp := ap[offset+1 : offset+n-i] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + offset -= n - i + 1 + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + jx := kx + atmp := ap[offset-i : offset] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + ix += incX + offset += i + 2 + } +} + +// Dtbsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, +// and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + // Form x = A^-1 x. + // Several cases below use subslices for speed improvement. + // The incX != 1 cases usually do not because incX may be negative. + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + atmp := a[i*lda+1:] + xtmp := x[i+1 : i+bands+1] + var sum float64 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + max := k + 1 + if i+max > n { + max = n - i + } + atmp := a[i*lda:] + var ( + jx int + sum float64 + ) + for j := 1; j < max; j++ { + jx += incX + sum += x[ix+jx] * atmp[j] + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[0] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + xtmp := x[i-bands : i] + var sum float64 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= atmp[bands] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * atmp[j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[bands] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var sum float64 + for j := 0; j < bands; j++ { + sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda] + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var sum float64 + xtmp := x[i+1 : i+1+bands] + for j, v := range xtmp { + sum += v * a[(i+j+1)*lda+k-j-1] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+k] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+k] + } + ix -= incX + } +} + +// Dsbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric band matrix with k super-diagonals, x and y are +// vectors, and alpha and beta are scalars. +func (Implementation) Dsbmv(ul blas.Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up indexes + lenX := n + lenY := n + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f64.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[i] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[i+j] * v + y[iy+jy] += tmp * v + jy += incY + } + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[ix] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jx := incX + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + return + } + + // Casses where a has bands below the diagonal. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[i] + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += tmp * v + jy += incY + } + y[iy] += tmp * atmp[k] + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[ix] + jx := l * incX + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += tmp * atmp[k] + ix += incX + iy += incY + } +} + +// Dsyr performs the symmetric rank-one update +// A += alpha * x * x^T +// where A is an n×n symmetric matrix, and x is a vector. +func (Implementation) Dsyr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda+i : i*lda+n] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += v * tmp + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + jx := ix + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += x[jx] * tmp + jx += incX + } + } + ix += incX + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda:] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += tmp * v + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + atmp := a[i*lda:] + jx := kx + for j := 0; j < i+1; j++ { + atmp[j] += tmp * x[jx] + jx += incX + } + } + ix += incX + } +} + +// Dsyr2 performs the symmetric rank-two update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. +func (Implementation) Dsyr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } +} + +// Dtpsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + offset = n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + var sum float64 + for j, v := range atmp { + sum += v * xtmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix -= incX + offset -= n - i + 1 + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i : offset] + var sum float64 + for j, v := range atmp { + sum += v * x[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i : offset] + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix += incX + offset += i + 2 + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] -= v * xi + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] -= v * xi + } + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix -= incX + offset -= i + 1 + } +} + +// Dspmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +func (Implementation) Dspmv(ul blas.Uplo, n int, alpha float64, ap []float64, x []float64, incX int, beta float64, y []float64, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f64.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * ap[0] * x[0] + return + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := ap[offset] * x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + jy := ky + (i+1)*incY + for j, v := range atmp { + sum += v * xtmp[j] + y[jy] += v * xv + jy += incY + } + y[iy] += alpha * sum + iy += incY + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := ap[offset] * x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + atmp := ap[offset-i : offset] + jy := ky + var sum float64 + for j, v := range atmp { + sum += v * x[j] + y[jy] += v * xv + jy += incY + } + sum += ap[offset] * x[i] + y[iy] += alpha * sum + iy += incY + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + atmp := ap[offset-i : offset] + jx := kx + jy := ky + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + + sum += ap[offset] * x[ix] + y[iy] += alpha * sum + ix += incX + iy += incY + offset += i + 2 + } +} + +// Dspr performs the symmetric rank-one operation +// A += alpha * x * x^T +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +func (Implementation) Dspr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, ap []float64) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xv := alpha * x[i] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + i*incX + atmp := ap[offset:] + xv := alpha * x[ix] + for j := 0; j < n-i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xv := alpha * x[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i:] + xv := alpha * x[ix] + for j := 0; j <= i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += i + 2 + } +} + +// Dspr2 performs the symmetric rank-2 update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +func (Implementation) Dspr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, ap []float64) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xi := x[i] + yi := y[i] + xtmp := x[i:n] + ytmp := y[i:n] + for j, v := range xtmp { + atmp[j] += alpha * (xi*ytmp[j] + v*yi) + } + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + atmp := ap[offset:] + xi := x[ix] + yi := y[iy] + for j := 0; j < n-i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xi := x[i] + yi := y[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += alpha * (xi*y[j] + v*yi) + } + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + atmp := ap[offset-i:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += i + 2 + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go new file mode 100644 index 00000000..e4a2bb5e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go @@ -0,0 +1,1715 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math/cmplx" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c128" +) + +var _ blas.Complex128Level3 = Implementation{} + +// Zgemm performs one of the matrix-matrix operations +// C = alpha * op(A) * op(B) + beta * C +// where op(X) is one of +// op(X) = X or op(X) = X^T or op(X) = X^H, +// alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, +// op(B) a k×n matrix and C an m×n matrix. +func (Implementation) Zgemm(tA, tB blas.Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + switch tA { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch tB { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + } + rowA, colA := m, k + if tA != blas.NoTrans { + rowA, colA = k, m + } + if lda < max(1, colA) { + panic(badLdA) + } + rowB, colB := k, n + if tB != blas.NoTrans { + rowB, colB = n, k + } + if ldb < max(1, colB) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(b) < (rowB-1)*ldb+colB { + panic(shortB) + } + if len(c) < (m-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + } + return + } + + switch tA { + case blas.NoTrans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A * B + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * b[l*ldb+j] + } + } + } + case blas.Trans: + // Form C = alpha * A * B^T + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * b[j*ldb+l] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A * B^H + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * cmplx.Conj(b[j*ldb+l]) + } + } + } + } + case blas.Trans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A^T * B + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * b[l*ldb+j] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.Trans: + // Form C = alpha * A^T * B^T + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * b[j*ldb+l] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A^T * B^H + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * cmplx.Conj(b[j*ldb+l]) + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + case blas.ConjTrans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A^H * B + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * b[l*ldb+j] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.Trans: + // Form C = alpha * A^H * B^T + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * b[j*ldb+l] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A^H * B^H + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * cmplx.Conj(b[j*ldb+l]) + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Zhemm performs one of the matrix-matrix operations +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// where alpha and beta are scalars, A is an m×m or n×n hermitian matrix and B +// and C are m×n matrices. The imaginary parts of the diagonal elements of A are +// assumed to be zero. +func (Implementation) Zhemm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(na-1)+na { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + c128.ScalUnitary(beta, ci) + } + } + return + } + + if side == blas.Left { + // Form C = alpha*A*B + beta*C. + for i := 0; i < m; i++ { + atmp := alpha * complex(real(a[i*lda+i]), 0) + bi := b[i*ldb : i*ldb+n] + ci := c[i*ldc : i*ldc+n] + if beta == 0 { + for j, bij := range bi { + ci[j] = atmp * bij + } + } else { + for j, bij := range bi { + ci[j] = atmp*bij + beta*ci[j] + } + } + if uplo == blas.Upper { + for k := 0; k < i; k++ { + atmp = alpha * cmplx.Conj(a[k*lda+i]) + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[i*lda+k] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } else { + for k := 0; k < i; k++ { + atmp = alpha * a[i*lda+k] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * cmplx.Conj(a[k*lda+i]) + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } + } + } else { + // Form C = alpha*B*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + abij := alpha * b[i*ldb+j] + aj := a[j*lda+j+1 : j*lda+n] + bi := b[i*ldb+j+1 : i*ldb+n] + ci := c[i*ldc+j+1 : i*ldc+n] + var tmp complex128 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * cmplx.Conj(ajk) + } + ajj := complex(real(a[j*lda+j]), 0) + if beta == 0 { + c[i*ldc+j] = abij*ajj + alpha*tmp + } else { + c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + abij := alpha * b[i*ldb+j] + aj := a[j*lda : j*lda+j] + bi := b[i*ldb : i*ldb+j] + ci := c[i*ldc : i*ldc+j] + var tmp complex128 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * cmplx.Conj(ajk) + } + ajj := complex(real(a[j*lda+j]), 0) + if beta == 0 { + c[i*ldc+j] = abij*ajj + alpha*tmp + } else { + c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Zherk performs one of the hermitian rank-k operations +// C = alpha*A*A^H + beta*C if trans == blas.NoTrans +// C = alpha*A^H*A + beta*C if trans == blas.ConjTrans +// where alpha and beta are real scalars, C is an n×n hermitian matrix and A is +// an n×k matrix in the first case and a k×n matrix in the second case. +// +// The imaginary parts of the diagonal elements of C are assumed to be zero, and +// on return they will be set to zero. +func (Implementation) Zherk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha float64, a []complex128, lda int, beta float64, c []complex128, ldc int) { + var rowA, colA int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + rowA, colA = n, k + case blas.ConjTrans: + rowA, colA = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, colA): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ci[0] = complex(beta*real(ci[0]), 0) + if i != n-1 { + c128.DscalUnitary(beta, ci[1:]) + } + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + if i != 0 { + c128.DscalUnitary(beta, ci[:i]) + } + ci[i] = complex(beta*real(ci[i]), 0) + } + } + } + return + } + + calpha := complex(alpha, 0) + if trans == blas.NoTrans { + // Form C = alpha*A*A^H + beta*C. + cbeta := complex(beta, 0) + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + switch { + case beta == 0: + // Handle the i-th diagonal element of C. + ci[0] = complex(alpha*real(c128.DotcUnitary(ai, ai)), 0) + // Handle the remaining elements on the i-th row of C. + for jc := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha * c128.DotcUnitary(a[j*lda:j*lda+k], ai) + } + case beta != 1: + cii := calpha*c128.DotcUnitary(ai, ai) + cbeta*ci[0] + ci[0] = complex(real(cii), 0) + for jc, cij := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij + } + default: + cii := calpha*c128.DotcUnitary(ai, ai) + ci[0] + ci[0] = complex(real(cii), 0) + for jc, cij := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + switch { + case beta == 0: + // Handle the first i-1 elements on the i-th row of C. + for j := range ci[:i] { + ci[j] = calpha * c128.DotcUnitary(a[j*lda:j*lda+k], ai) + } + // Handle the i-th diagonal element of C. + ci[i] = complex(alpha*real(c128.DotcUnitary(ai, ai)), 0) + case beta != 1: + for j, cij := range ci[:i] { + ci[j] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij + } + cii := calpha*c128.DotcUnitary(ai, ai) + cbeta*ci[i] + ci[i] = complex(real(cii), 0) + default: + for j, cij := range ci[:i] { + ci[j] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cij + } + cii := calpha*c128.DotcUnitary(ai, ai) + ci[i] + ci[i] = complex(real(cii), 0) + } + } + } + } else { + // Form C = alpha*A^H*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + c128.DscalUnitary(beta, ci) + ci[0] = complex(real(ci[0]), 0) + default: + ci[0] = complex(real(ci[0]), 0) + } + for j := 0; j < k; j++ { + aji := cmplx.Conj(a[j*lda+i]) + if aji != 0 { + c128.AxpyUnitary(calpha*aji, a[j*lda+i:j*lda+n], ci) + } + } + c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + c128.DscalUnitary(beta, ci) + ci[i] = complex(real(ci[i]), 0) + default: + ci[i] = complex(real(ci[i]), 0) + } + for j := 0; j < k; j++ { + aji := cmplx.Conj(a[j*lda+i]) + if aji != 0 { + c128.AxpyUnitary(calpha*aji, a[j*lda:j*lda+i+1], ci) + } + } + c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) + } + } + } +} + +// Zher2k performs one of the hermitian rank-2k operations +// C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C if trans == blas.NoTrans +// C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C if trans == blas.ConjTrans +// where alpha and beta are scalars with beta real, C is an n×n hermitian matrix +// and A and B are n×k matrices in the first case and k×n matrices in the second case. +// +// The imaginary parts of the diagonal elements of C are assumed to be zero, and +// on return they will be set to zero. +func (Implementation) Zher2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta float64, c []complex128, ldc int) { + var row, col int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + row, col = n, k + case blas.ConjTrans: + row, col = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, col): + panic(badLdA) + case ldb < max(1, col): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (row-1)*lda+col { + panic(shortA) + } + if len(b) < (row-1)*ldb+col { + panic(shortB) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ci[0] = complex(beta*real(ci[0]), 0) + if i != n-1 { + c128.DscalUnitary(beta, ci[1:]) + } + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + if i != 0 { + c128.DscalUnitary(beta, ci[:i]) + } + ci[i] = complex(beta*real(ci[i]), 0) + } + } + } + return + } + + conjalpha := cmplx.Conj(alpha) + cbeta := complex(beta, 0) + if trans == blas.NoTrans { + // Form C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i+1 : i*ldc+n] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + c[i*ldc+i] = complex(real(cii), 0) + for jc := range ci { + j := i + 1 + jc + ci[jc] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + } + } else { + cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] + c[i*ldc+i] = complex(real(cii), 0) + for jc, cij := range ci { + j := i + 1 + jc + ci[jc] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for j := range ci { + ci[j] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + } + cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + c[i*ldc+i] = complex(real(cii), 0) + } else { + for j, cij := range ci { + ci[j] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij + } + cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] + c[i*ldc+i] = complex(real(cii), 0) + } + } + } + } else { + // Form C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + c128.DscalUnitary(beta, ci) + ci[0] = complex(real(ci[0]), 0) + default: + ci[0] = complex(real(ci[0]), 0) + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c128.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb+i:j*ldb+n], ci) + } + if bji != 0 { + c128.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda+i:j*lda+n], ci) + } + } + ci[0] = complex(real(ci[0]), 0) + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + c128.DscalUnitary(beta, ci) + ci[i] = complex(real(ci[i]), 0) + default: + ci[i] = complex(real(ci[i]), 0) + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c128.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb:j*ldb+i+1], ci) + } + if bji != 0 { + c128.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda:j*lda+i+1], ci) + } + } + ci[i] = complex(real(ci[i]), 0) + } + } + } +} + +// Zsymm performs one of the matrix-matrix operations +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// where alpha and beta are scalars, A is an m×m or n×n symmetric matrix and B +// and C are m×n matrices. +func (Implementation) Zsymm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(na-1)+na { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + c128.ScalUnitary(beta, ci) + } + } + return + } + + if side == blas.Left { + // Form C = alpha*A*B + beta*C. + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + bi := b[i*ldb : i*ldb+n] + ci := c[i*ldc : i*ldc+n] + if beta == 0 { + for j, bij := range bi { + ci[j] = atmp * bij + } + } else { + for j, bij := range bi { + ci[j] = atmp*bij + beta*ci[j] + } + } + if uplo == blas.Upper { + for k := 0; k < i; k++ { + atmp = alpha * a[k*lda+i] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[i*lda+k] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } else { + for k := 0; k < i; k++ { + atmp = alpha * a[i*lda+k] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[k*lda+i] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } + } + } else { + // Form C = alpha*B*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + abij := alpha * b[i*ldb+j] + aj := a[j*lda+j+1 : j*lda+n] + bi := b[i*ldb+j+1 : i*ldb+n] + ci := c[i*ldc+j+1 : i*ldc+n] + var tmp complex128 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * ajk + } + if beta == 0 { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + } else { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + abij := alpha * b[i*ldb+j] + aj := a[j*lda : j*lda+j] + bi := b[i*ldb : i*ldb+j] + ci := c[i*ldc : i*ldc+j] + var tmp complex128 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * ajk + } + if beta == 0 { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + } else { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Zsyrk performs one of the symmetric rank-k operations +// C = alpha*A*A^T + beta*C if trans == blas.NoTrans +// C = alpha*A^T*A + beta*C if trans == blas.Trans +// where alpha and beta are scalars, C is an n×n symmetric matrix and A is +// an n×k matrix in the first case and a k×n matrix in the second case. +func (Implementation) Zsyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) { + var rowA, colA int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + rowA, colA = n, k + case blas.Trans: + rowA, colA = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, colA): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + c128.ScalUnitary(beta, ci) + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + c128.ScalUnitary(beta, ci) + } + } + } + return + } + + if trans == blas.NoTrans { + // Form C = alpha*A*A^T + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + for jc, cij := range ci { + j := i + jc + ci[jc] = beta*cij + alpha*c128.DotuUnitary(ai, a[j*lda:j*lda+k]) + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + for j, cij := range ci { + ci[j] = beta*cij + alpha*c128.DotuUnitary(ai, a[j*lda:j*lda+k]) + } + } + } + } else { + // Form C = alpha*A^T*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + for jc := range ci { + ci[jc] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + if aji != 0 { + c128.AxpyUnitary(alpha*aji, a[j*lda+i:j*lda+n], ci) + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + for j := range ci { + ci[j] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + if aji != 0 { + c128.AxpyUnitary(alpha*aji, a[j*lda:j*lda+i+1], ci) + } + } + } + } + } +} + +// Zsyr2k performs one of the symmetric rank-2k operations +// C = alpha*A*B^T + alpha*B*A^T + beta*C if trans == blas.NoTrans +// C = alpha*A^T*B + alpha*B^T*A + beta*C if trans == blas.Trans +// where alpha and beta are scalars, C is an n×n symmetric matrix and A and B +// are n×k matrices in the first case and k×n matrices in the second case. +func (Implementation) Zsyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + var row, col int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + row, col = n, k + case blas.Trans: + row, col = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, col): + panic(badLdA) + case ldb < max(1, col): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (row-1)*lda+col { + panic(shortA) + } + if len(b) < (row-1)*ldb+col { + panic(shortB) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + c128.ScalUnitary(beta, ci) + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + c128.ScalUnitary(beta, ci) + } + } + } + return + } + + if trans == blas.NoTrans { + // Form C = alpha*A*B^T + alpha*B*A^T + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for jc := range ci { + j := i + jc + ci[jc] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + } + } else { + for jc, cij := range ci { + j := i + jc + ci[jc] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for j := range ci { + ci[j] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + } + } else { + for j, cij := range ci { + ci[j] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij + } + } + } + } + } else { + // Form C = alpha*A^T*B + alpha*B^T*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + for jc := range ci { + ci[jc] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c128.AxpyUnitary(alpha*aji, b[j*ldb+i:j*ldb+n], ci) + } + if bji != 0 { + c128.AxpyUnitary(alpha*bji, a[j*lda+i:j*lda+n], ci) + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + for j := range ci { + ci[j] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c128.AxpyUnitary(alpha*aji, b[j*ldb:j*ldb+i+1], ci) + } + if bji != 0 { + c128.AxpyUnitary(alpha*bji, a[j*lda:j*lda+i+1], ci) + } + } + } + } + } +} + +// Ztrmm performs one of the matrix-matrix operations +// B = alpha * op(A) * B if side == blas.Left, +// B = alpha * B * op(A) if side == blas.Right, +// where alpha is a scalar, B is an m×n matrix, A is a unit, or non-unit, +// upper or lower triangular matrix and op(A) is one of +// op(A) = A if trans == blas.NoTrans, +// op(A) = A^T if trans == blas.Trans, +// op(A) = A^H if trans == blas.ConjTrans. +func (Implementation) Ztrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTranspose) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (na-1)*lda+na { + panic(shortA) + } + if len(b) < (m-1)*ldb+n { + panic(shortB) + } + + // Quick return if possible. + if alpha == 0 { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] = 0 + } + } + return + } + + noConj := trans != blas.ConjTrans + noUnit := diag == blas.NonUnit + if side == blas.Left { + if trans == blas.NoTrans { + // Form B = alpha*A*B. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + aii := alpha + if noUnit { + aii *= a[i*lda+i] + } + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] *= aii + } + for ja, aij := range a[i*lda+i+1 : i*lda+m] { + j := ja + i + 1 + if aij != 0 { + c128.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) + } + } + } + } else { + for i := m - 1; i >= 0; i-- { + aii := alpha + if noUnit { + aii *= a[i*lda+i] + } + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] *= aii + } + for j, aij := range a[i*lda : i*lda+i] { + if aij != 0 { + c128.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) + } + } + } + } + } else { + // Form B = alpha*A^T*B or B = alpha*A^H*B. + if uplo == blas.Upper { + for k := m - 1; k >= 0; k-- { + bk := b[k*ldb : k*ldb+n] + for ja, ajk := range a[k*lda+k+1 : k*lda+m] { + if ajk == 0 { + continue + } + j := k + 1 + ja + if noConj { + c128.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) + } else { + c128.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) + } + } + akk := alpha + if noUnit { + if noConj { + akk *= a[k*lda+k] + } else { + akk *= cmplx.Conj(a[k*lda+k]) + } + } + if akk != 1 { + c128.ScalUnitary(akk, bk) + } + } + } else { + for k := 0; k < m; k++ { + bk := b[k*ldb : k*ldb+n] + for j, ajk := range a[k*lda : k*lda+k] { + if ajk == 0 { + continue + } + if noConj { + c128.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) + } else { + c128.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) + } + } + akk := alpha + if noUnit { + if noConj { + akk *= a[k*lda+k] + } else { + akk *= cmplx.Conj(a[k*lda+k]) + } + } + if akk != 1 { + c128.ScalUnitary(akk, bk) + } + } + } + } + } else { + if trans == blas.NoTrans { + // Form B = alpha*B*A. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + abik := alpha * bi[k] + if abik == 0 { + continue + } + bi[k] = abik + if noUnit { + bi[k] *= a[k*lda+k] + } + c128.AxpyUnitary(abik, a[k*lda+k+1:k*lda+n], bi[k+1:]) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + abik := alpha * bi[k] + if abik == 0 { + continue + } + bi[k] = abik + if noUnit { + bi[k] *= a[k*lda+k] + } + c128.AxpyUnitary(abik, a[k*lda:k*lda+k], bi[:k]) + } + } + } + } else { + // Form B = alpha*B*A^T or B = alpha*B*A^H. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j, bij := range bi { + if noConj { + if noUnit { + bij *= a[j*lda+j] + } + bij += c128.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } else { + if noUnit { + bij *= cmplx.Conj(a[j*lda+j]) + } + bij += c128.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } + bi[j] = alpha * bij + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + bij := bi[j] + if noConj { + if noUnit { + bij *= a[j*lda+j] + } + bij += c128.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) + } else { + if noUnit { + bij *= cmplx.Conj(a[j*lda+j]) + } + bij += c128.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) + } + bi[j] = alpha * bij + } + } + } + } + } +} + +// Ztrsm solves one of the matrix equations +// op(A) * X = alpha * B if side == blas.Left, +// X * op(A) = alpha * B if side == blas.Right, +// where alpha is a scalar, X and B are m×n matrices, A is a unit or +// non-unit, upper or lower triangular matrix and op(A) is one of +// op(A) = A if transA == blas.NoTrans, +// op(A) = A^T if transA == blas.Trans, +// op(A) = A^H if transA == blas.ConjTrans. +// On return the matrix X is overwritten on B. +func (Implementation) Ztrsm(side blas.Side, uplo blas.Uplo, transA blas.Transpose, diag blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case transA != blas.NoTrans && transA != blas.Trans && transA != blas.ConjTrans: + panic(badTranspose) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (na-1)*lda+na { + panic(shortA) + } + if len(b) < (m-1)*ldb+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + b[i*ldb+j] = 0 + } + } + return + } + + noConj := transA != blas.ConjTrans + noUnit := diag == blas.NonUnit + if side == blas.Left { + if transA == blas.NoTrans { + // Form B = alpha*inv(A)*B. + if uplo == blas.Upper { + for i := m - 1; i >= 0; i-- { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + for ka, aik := range a[i*lda+i+1 : i*lda+m] { + k := i + 1 + ka + if aik != 0 { + c128.AxpyUnitary(-aik, b[k*ldb:k*ldb+n], bi) + } + } + if noUnit { + c128.ScalUnitary(1/a[i*lda+i], bi) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + for j, aij := range a[i*lda : i*lda+i] { + if aij != 0 { + c128.AxpyUnitary(-aij, b[j*ldb:j*ldb+n], bi) + } + } + if noUnit { + c128.ScalUnitary(1/a[i*lda+i], bi) + } + } + } + } else { + // Form B = alpha*inv(A^T)*B or B = alpha*inv(A^H)*B. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if noUnit { + if noConj { + c128.ScalUnitary(1/a[i*lda+i], bi) + } else { + c128.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) + } + } + for ja, aij := range a[i*lda+i+1 : i*lda+m] { + if aij == 0 { + continue + } + j := i + 1 + ja + if noConj { + c128.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) + } else { + c128.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) + } + } + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + } + } else { + for i := m - 1; i >= 0; i-- { + bi := b[i*ldb : i*ldb+n] + if noUnit { + if noConj { + c128.ScalUnitary(1/a[i*lda+i], bi) + } else { + c128.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) + } + } + for j, aij := range a[i*lda : i*lda+i] { + if aij == 0 { + continue + } + if noConj { + c128.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) + } else { + c128.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) + } + } + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + } + } + } + } else { + if transA == blas.NoTrans { + // Form B = alpha*B*inv(A). + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + for j, bij := range bi { + if bij == 0 { + continue + } + if noUnit { + bi[j] /= a[j*lda+j] + } + c128.AxpyUnitary(-bi[j], a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + for j := n - 1; j >= 0; j-- { + if bi[j] == 0 { + continue + } + if noUnit { + bi[j] /= a[j*lda+j] + } + c128.AxpyUnitary(-bi[j], a[j*lda:j*lda+j], bi[:j]) + } + } + } + } else { + // Form B = alpha*B*inv(A^T) or B = alpha*B*inv(A^H). + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + bij := alpha * bi[j] + if noConj { + bij -= c128.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + if noUnit { + bij /= a[j*lda+j] + } + } else { + bij -= c128.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + if noUnit { + bij /= cmplx.Conj(a[j*lda+j]) + } + } + bi[j] = bij + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j, bij := range bi { + bij *= alpha + if noConj { + bij -= c128.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) + if noUnit { + bij /= a[j*lda+j] + } + } else { + bij -= c128.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) + if noUnit { + bij /= cmplx.Conj(a[j*lda+j]) + } + } + bi[j] = bij + } + } + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go new file mode 100644 index 00000000..436c5450 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go @@ -0,0 +1,1735 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + cmplx "gonum.org/v1/gonum/internal/cmplx64" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c64" +) + +var _ blas.Complex64Level3 = Implementation{} + +// Cgemm performs one of the matrix-matrix operations +// C = alpha * op(A) * op(B) + beta * C +// where op(X) is one of +// op(X) = X or op(X) = X^T or op(X) = X^H, +// alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, +// op(B) a k×n matrix and C an m×n matrix. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgemm(tA, tB blas.Transpose, m, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + switch tA { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch tB { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + } + rowA, colA := m, k + if tA != blas.NoTrans { + rowA, colA = k, m + } + if lda < max(1, colA) { + panic(badLdA) + } + rowB, colB := k, n + if tB != blas.NoTrans { + rowB, colB = n, k + } + if ldb < max(1, colB) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(b) < (rowB-1)*ldb+colB { + panic(shortB) + } + if len(c) < (m-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + } + return + } + + switch tA { + case blas.NoTrans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A * B + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * b[l*ldb+j] + } + } + } + case blas.Trans: + // Form C = alpha * A * B^T + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * b[j*ldb+l] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A * B^H + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * cmplx.Conj(b[j*ldb+l]) + } + } + } + } + case blas.Trans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A^T * B + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * b[l*ldb+j] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.Trans: + // Form C = alpha * A^T * B^T + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * b[j*ldb+l] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A^T * B^H + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * cmplx.Conj(b[j*ldb+l]) + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + case blas.ConjTrans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A^H * B + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * b[l*ldb+j] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.Trans: + // Form C = alpha * A^H * B^T + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * b[j*ldb+l] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A^H * B^H + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * cmplx.Conj(b[j*ldb+l]) + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Chemm performs one of the matrix-matrix operations +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// where alpha and beta are scalars, A is an m×m or n×n hermitian matrix and B +// and C are m×n matrices. The imaginary parts of the diagonal elements of A are +// assumed to be zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chemm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(na-1)+na { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + c64.ScalUnitary(beta, ci) + } + } + return + } + + if side == blas.Left { + // Form C = alpha*A*B + beta*C. + for i := 0; i < m; i++ { + atmp := alpha * complex(real(a[i*lda+i]), 0) + bi := b[i*ldb : i*ldb+n] + ci := c[i*ldc : i*ldc+n] + if beta == 0 { + for j, bij := range bi { + ci[j] = atmp * bij + } + } else { + for j, bij := range bi { + ci[j] = atmp*bij + beta*ci[j] + } + } + if uplo == blas.Upper { + for k := 0; k < i; k++ { + atmp = alpha * cmplx.Conj(a[k*lda+i]) + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[i*lda+k] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } else { + for k := 0; k < i; k++ { + atmp = alpha * a[i*lda+k] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * cmplx.Conj(a[k*lda+i]) + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } + } + } else { + // Form C = alpha*B*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + abij := alpha * b[i*ldb+j] + aj := a[j*lda+j+1 : j*lda+n] + bi := b[i*ldb+j+1 : i*ldb+n] + ci := c[i*ldc+j+1 : i*ldc+n] + var tmp complex64 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * cmplx.Conj(ajk) + } + ajj := complex(real(a[j*lda+j]), 0) + if beta == 0 { + c[i*ldc+j] = abij*ajj + alpha*tmp + } else { + c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + abij := alpha * b[i*ldb+j] + aj := a[j*lda : j*lda+j] + bi := b[i*ldb : i*ldb+j] + ci := c[i*ldc : i*ldc+j] + var tmp complex64 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * cmplx.Conj(ajk) + } + ajj := complex(real(a[j*lda+j]), 0) + if beta == 0 { + c[i*ldc+j] = abij*ajj + alpha*tmp + } else { + c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Cherk performs one of the hermitian rank-k operations +// C = alpha*A*A^H + beta*C if trans == blas.NoTrans +// C = alpha*A^H*A + beta*C if trans == blas.ConjTrans +// where alpha and beta are real scalars, C is an n×n hermitian matrix and A is +// an n×k matrix in the first case and a k×n matrix in the second case. +// +// The imaginary parts of the diagonal elements of C are assumed to be zero, and +// on return they will be set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cherk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha float32, a []complex64, lda int, beta float32, c []complex64, ldc int) { + var rowA, colA int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + rowA, colA = n, k + case blas.ConjTrans: + rowA, colA = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, colA): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ci[0] = complex(beta*real(ci[0]), 0) + if i != n-1 { + c64.SscalUnitary(beta, ci[1:]) + } + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + if i != 0 { + c64.SscalUnitary(beta, ci[:i]) + } + ci[i] = complex(beta*real(ci[i]), 0) + } + } + } + return + } + + calpha := complex(alpha, 0) + if trans == blas.NoTrans { + // Form C = alpha*A*A^H + beta*C. + cbeta := complex(beta, 0) + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + switch { + case beta == 0: + // Handle the i-th diagonal element of C. + ci[0] = complex(alpha*real(c64.DotcUnitary(ai, ai)), 0) + // Handle the remaining elements on the i-th row of C. + for jc := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha * c64.DotcUnitary(a[j*lda:j*lda+k], ai) + } + case beta != 1: + cii := calpha*c64.DotcUnitary(ai, ai) + cbeta*ci[0] + ci[0] = complex(real(cii), 0) + for jc, cij := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij + } + default: + cii := calpha*c64.DotcUnitary(ai, ai) + ci[0] + ci[0] = complex(real(cii), 0) + for jc, cij := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + switch { + case beta == 0: + // Handle the first i-1 elements on the i-th row of C. + for j := range ci[:i] { + ci[j] = calpha * c64.DotcUnitary(a[j*lda:j*lda+k], ai) + } + // Handle the i-th diagonal element of C. + ci[i] = complex(alpha*real(c64.DotcUnitary(ai, ai)), 0) + case beta != 1: + for j, cij := range ci[:i] { + ci[j] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij + } + cii := calpha*c64.DotcUnitary(ai, ai) + cbeta*ci[i] + ci[i] = complex(real(cii), 0) + default: + for j, cij := range ci[:i] { + ci[j] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cij + } + cii := calpha*c64.DotcUnitary(ai, ai) + ci[i] + ci[i] = complex(real(cii), 0) + } + } + } + } else { + // Form C = alpha*A^H*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + c64.SscalUnitary(beta, ci) + ci[0] = complex(real(ci[0]), 0) + default: + ci[0] = complex(real(ci[0]), 0) + } + for j := 0; j < k; j++ { + aji := cmplx.Conj(a[j*lda+i]) + if aji != 0 { + c64.AxpyUnitary(calpha*aji, a[j*lda+i:j*lda+n], ci) + } + } + c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + c64.SscalUnitary(beta, ci) + ci[i] = complex(real(ci[i]), 0) + default: + ci[i] = complex(real(ci[i]), 0) + } + for j := 0; j < k; j++ { + aji := cmplx.Conj(a[j*lda+i]) + if aji != 0 { + c64.AxpyUnitary(calpha*aji, a[j*lda:j*lda+i+1], ci) + } + } + c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) + } + } + } +} + +// Cher2k performs one of the hermitian rank-2k operations +// C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C if trans == blas.NoTrans +// C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C if trans == blas.ConjTrans +// where alpha and beta are scalars with beta real, C is an n×n hermitian matrix +// and A and B are n×k matrices in the first case and k×n matrices in the second case. +// +// The imaginary parts of the diagonal elements of C are assumed to be zero, and +// on return they will be set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cher2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta float32, c []complex64, ldc int) { + var row, col int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + row, col = n, k + case blas.ConjTrans: + row, col = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, col): + panic(badLdA) + case ldb < max(1, col): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (row-1)*lda+col { + panic(shortA) + } + if len(b) < (row-1)*ldb+col { + panic(shortB) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ci[0] = complex(beta*real(ci[0]), 0) + if i != n-1 { + c64.SscalUnitary(beta, ci[1:]) + } + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + if i != 0 { + c64.SscalUnitary(beta, ci[:i]) + } + ci[i] = complex(beta*real(ci[i]), 0) + } + } + } + return + } + + conjalpha := cmplx.Conj(alpha) + cbeta := complex(beta, 0) + if trans == blas.NoTrans { + // Form C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i+1 : i*ldc+n] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + c[i*ldc+i] = complex(real(cii), 0) + for jc := range ci { + j := i + 1 + jc + ci[jc] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + } + } else { + cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] + c[i*ldc+i] = complex(real(cii), 0) + for jc, cij := range ci { + j := i + 1 + jc + ci[jc] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for j := range ci { + ci[j] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + } + cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + c[i*ldc+i] = complex(real(cii), 0) + } else { + for j, cij := range ci { + ci[j] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij + } + cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] + c[i*ldc+i] = complex(real(cii), 0) + } + } + } + } else { + // Form C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + c64.SscalUnitary(beta, ci) + ci[0] = complex(real(ci[0]), 0) + default: + ci[0] = complex(real(ci[0]), 0) + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c64.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb+i:j*ldb+n], ci) + } + if bji != 0 { + c64.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda+i:j*lda+n], ci) + } + } + ci[0] = complex(real(ci[0]), 0) + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + c64.SscalUnitary(beta, ci) + ci[i] = complex(real(ci[i]), 0) + default: + ci[i] = complex(real(ci[i]), 0) + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c64.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb:j*ldb+i+1], ci) + } + if bji != 0 { + c64.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda:j*lda+i+1], ci) + } + } + ci[i] = complex(real(ci[i]), 0) + } + } + } +} + +// Csymm performs one of the matrix-matrix operations +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// where alpha and beta are scalars, A is an m×m or n×n symmetric matrix and B +// and C are m×n matrices. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Csymm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(na-1)+na { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + c64.ScalUnitary(beta, ci) + } + } + return + } + + if side == blas.Left { + // Form C = alpha*A*B + beta*C. + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + bi := b[i*ldb : i*ldb+n] + ci := c[i*ldc : i*ldc+n] + if beta == 0 { + for j, bij := range bi { + ci[j] = atmp * bij + } + } else { + for j, bij := range bi { + ci[j] = atmp*bij + beta*ci[j] + } + } + if uplo == blas.Upper { + for k := 0; k < i; k++ { + atmp = alpha * a[k*lda+i] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[i*lda+k] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } else { + for k := 0; k < i; k++ { + atmp = alpha * a[i*lda+k] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[k*lda+i] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } + } + } else { + // Form C = alpha*B*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + abij := alpha * b[i*ldb+j] + aj := a[j*lda+j+1 : j*lda+n] + bi := b[i*ldb+j+1 : i*ldb+n] + ci := c[i*ldc+j+1 : i*ldc+n] + var tmp complex64 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * ajk + } + if beta == 0 { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + } else { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + abij := alpha * b[i*ldb+j] + aj := a[j*lda : j*lda+j] + bi := b[i*ldb : i*ldb+j] + ci := c[i*ldc : i*ldc+j] + var tmp complex64 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * ajk + } + if beta == 0 { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + } else { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Csyrk performs one of the symmetric rank-k operations +// C = alpha*A*A^T + beta*C if trans == blas.NoTrans +// C = alpha*A^T*A + beta*C if trans == blas.Trans +// where alpha and beta are scalars, C is an n×n symmetric matrix and A is +// an n×k matrix in the first case and a k×n matrix in the second case. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Csyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, beta complex64, c []complex64, ldc int) { + var rowA, colA int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + rowA, colA = n, k + case blas.Trans: + rowA, colA = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, colA): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + c64.ScalUnitary(beta, ci) + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + c64.ScalUnitary(beta, ci) + } + } + } + return + } + + if trans == blas.NoTrans { + // Form C = alpha*A*A^T + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + for jc, cij := range ci { + j := i + jc + ci[jc] = beta*cij + alpha*c64.DotuUnitary(ai, a[j*lda:j*lda+k]) + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + for j, cij := range ci { + ci[j] = beta*cij + alpha*c64.DotuUnitary(ai, a[j*lda:j*lda+k]) + } + } + } + } else { + // Form C = alpha*A^T*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + for jc := range ci { + ci[jc] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + if aji != 0 { + c64.AxpyUnitary(alpha*aji, a[j*lda+i:j*lda+n], ci) + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + for j := range ci { + ci[j] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + if aji != 0 { + c64.AxpyUnitary(alpha*aji, a[j*lda:j*lda+i+1], ci) + } + } + } + } + } +} + +// Csyr2k performs one of the symmetric rank-2k operations +// C = alpha*A*B^T + alpha*B*A^T + beta*C if trans == blas.NoTrans +// C = alpha*A^T*B + alpha*B^T*A + beta*C if trans == blas.Trans +// where alpha and beta are scalars, C is an n×n symmetric matrix and A and B +// are n×k matrices in the first case and k×n matrices in the second case. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Csyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + var row, col int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + row, col = n, k + case blas.Trans: + row, col = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, col): + panic(badLdA) + case ldb < max(1, col): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (row-1)*lda+col { + panic(shortA) + } + if len(b) < (row-1)*ldb+col { + panic(shortB) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + c64.ScalUnitary(beta, ci) + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + c64.ScalUnitary(beta, ci) + } + } + } + return + } + + if trans == blas.NoTrans { + // Form C = alpha*A*B^T + alpha*B*A^T + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for jc := range ci { + j := i + jc + ci[jc] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + } + } else { + for jc, cij := range ci { + j := i + jc + ci[jc] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for j := range ci { + ci[j] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + } + } else { + for j, cij := range ci { + ci[j] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij + } + } + } + } + } else { + // Form C = alpha*A^T*B + alpha*B^T*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + for jc := range ci { + ci[jc] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c64.AxpyUnitary(alpha*aji, b[j*ldb+i:j*ldb+n], ci) + } + if bji != 0 { + c64.AxpyUnitary(alpha*bji, a[j*lda+i:j*lda+n], ci) + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + for j := range ci { + ci[j] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c64.AxpyUnitary(alpha*aji, b[j*ldb:j*ldb+i+1], ci) + } + if bji != 0 { + c64.AxpyUnitary(alpha*bji, a[j*lda:j*lda+i+1], ci) + } + } + } + } + } +} + +// Ctrmm performs one of the matrix-matrix operations +// B = alpha * op(A) * B if side == blas.Left, +// B = alpha * B * op(A) if side == blas.Right, +// where alpha is a scalar, B is an m×n matrix, A is a unit, or non-unit, +// upper or lower triangular matrix and op(A) is one of +// op(A) = A if trans == blas.NoTrans, +// op(A) = A^T if trans == blas.Trans, +// op(A) = A^H if trans == blas.ConjTrans. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTranspose) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (na-1)*lda+na { + panic(shortA) + } + if len(b) < (m-1)*ldb+n { + panic(shortB) + } + + // Quick return if possible. + if alpha == 0 { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] = 0 + } + } + return + } + + noConj := trans != blas.ConjTrans + noUnit := diag == blas.NonUnit + if side == blas.Left { + if trans == blas.NoTrans { + // Form B = alpha*A*B. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + aii := alpha + if noUnit { + aii *= a[i*lda+i] + } + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] *= aii + } + for ja, aij := range a[i*lda+i+1 : i*lda+m] { + j := ja + i + 1 + if aij != 0 { + c64.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) + } + } + } + } else { + for i := m - 1; i >= 0; i-- { + aii := alpha + if noUnit { + aii *= a[i*lda+i] + } + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] *= aii + } + for j, aij := range a[i*lda : i*lda+i] { + if aij != 0 { + c64.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) + } + } + } + } + } else { + // Form B = alpha*A^T*B or B = alpha*A^H*B. + if uplo == blas.Upper { + for k := m - 1; k >= 0; k-- { + bk := b[k*ldb : k*ldb+n] + for ja, ajk := range a[k*lda+k+1 : k*lda+m] { + if ajk == 0 { + continue + } + j := k + 1 + ja + if noConj { + c64.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) + } else { + c64.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) + } + } + akk := alpha + if noUnit { + if noConj { + akk *= a[k*lda+k] + } else { + akk *= cmplx.Conj(a[k*lda+k]) + } + } + if akk != 1 { + c64.ScalUnitary(akk, bk) + } + } + } else { + for k := 0; k < m; k++ { + bk := b[k*ldb : k*ldb+n] + for j, ajk := range a[k*lda : k*lda+k] { + if ajk == 0 { + continue + } + if noConj { + c64.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) + } else { + c64.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) + } + } + akk := alpha + if noUnit { + if noConj { + akk *= a[k*lda+k] + } else { + akk *= cmplx.Conj(a[k*lda+k]) + } + } + if akk != 1 { + c64.ScalUnitary(akk, bk) + } + } + } + } + } else { + if trans == blas.NoTrans { + // Form B = alpha*B*A. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + abik := alpha * bi[k] + if abik == 0 { + continue + } + bi[k] = abik + if noUnit { + bi[k] *= a[k*lda+k] + } + c64.AxpyUnitary(abik, a[k*lda+k+1:k*lda+n], bi[k+1:]) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + abik := alpha * bi[k] + if abik == 0 { + continue + } + bi[k] = abik + if noUnit { + bi[k] *= a[k*lda+k] + } + c64.AxpyUnitary(abik, a[k*lda:k*lda+k], bi[:k]) + } + } + } + } else { + // Form B = alpha*B*A^T or B = alpha*B*A^H. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j, bij := range bi { + if noConj { + if noUnit { + bij *= a[j*lda+j] + } + bij += c64.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } else { + if noUnit { + bij *= cmplx.Conj(a[j*lda+j]) + } + bij += c64.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } + bi[j] = alpha * bij + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + bij := bi[j] + if noConj { + if noUnit { + bij *= a[j*lda+j] + } + bij += c64.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) + } else { + if noUnit { + bij *= cmplx.Conj(a[j*lda+j]) + } + bij += c64.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) + } + bi[j] = alpha * bij + } + } + } + } + } +} + +// Ctrsm solves one of the matrix equations +// op(A) * X = alpha * B if side == blas.Left, +// X * op(A) = alpha * B if side == blas.Right, +// where alpha is a scalar, X and B are m×n matrices, A is a unit or +// non-unit, upper or lower triangular matrix and op(A) is one of +// op(A) = A if transA == blas.NoTrans, +// op(A) = A^T if transA == blas.Trans, +// op(A) = A^H if transA == blas.ConjTrans. +// On return the matrix X is overwritten on B. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctrsm(side blas.Side, uplo blas.Uplo, transA blas.Transpose, diag blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case transA != blas.NoTrans && transA != blas.Trans && transA != blas.ConjTrans: + panic(badTranspose) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (na-1)*lda+na { + panic(shortA) + } + if len(b) < (m-1)*ldb+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + b[i*ldb+j] = 0 + } + } + return + } + + noConj := transA != blas.ConjTrans + noUnit := diag == blas.NonUnit + if side == blas.Left { + if transA == blas.NoTrans { + // Form B = alpha*inv(A)*B. + if uplo == blas.Upper { + for i := m - 1; i >= 0; i-- { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + for ka, aik := range a[i*lda+i+1 : i*lda+m] { + k := i + 1 + ka + if aik != 0 { + c64.AxpyUnitary(-aik, b[k*ldb:k*ldb+n], bi) + } + } + if noUnit { + c64.ScalUnitary(1/a[i*lda+i], bi) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + for j, aij := range a[i*lda : i*lda+i] { + if aij != 0 { + c64.AxpyUnitary(-aij, b[j*ldb:j*ldb+n], bi) + } + } + if noUnit { + c64.ScalUnitary(1/a[i*lda+i], bi) + } + } + } + } else { + // Form B = alpha*inv(A^T)*B or B = alpha*inv(A^H)*B. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if noUnit { + if noConj { + c64.ScalUnitary(1/a[i*lda+i], bi) + } else { + c64.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) + } + } + for ja, aij := range a[i*lda+i+1 : i*lda+m] { + if aij == 0 { + continue + } + j := i + 1 + ja + if noConj { + c64.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) + } else { + c64.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) + } + } + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + } + } else { + for i := m - 1; i >= 0; i-- { + bi := b[i*ldb : i*ldb+n] + if noUnit { + if noConj { + c64.ScalUnitary(1/a[i*lda+i], bi) + } else { + c64.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) + } + } + for j, aij := range a[i*lda : i*lda+i] { + if aij == 0 { + continue + } + if noConj { + c64.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) + } else { + c64.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) + } + } + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + } + } + } + } else { + if transA == blas.NoTrans { + // Form B = alpha*B*inv(A). + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + for j, bij := range bi { + if bij == 0 { + continue + } + if noUnit { + bi[j] /= a[j*lda+j] + } + c64.AxpyUnitary(-bi[j], a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + for j := n - 1; j >= 0; j-- { + if bi[j] == 0 { + continue + } + if noUnit { + bi[j] /= a[j*lda+j] + } + c64.AxpyUnitary(-bi[j], a[j*lda:j*lda+j], bi[:j]) + } + } + } + } else { + // Form B = alpha*B*inv(A^T) or B = alpha*B*inv(A^H). + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + bij := alpha * bi[j] + if noConj { + bij -= c64.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + if noUnit { + bij /= a[j*lda+j] + } + } else { + bij -= c64.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + if noUnit { + bij /= cmplx.Conj(a[j*lda+j]) + } + } + bi[j] = bij + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j, bij := range bi { + bij *= alpha + if noConj { + bij -= c64.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) + if noUnit { + bij /= a[j*lda+j] + } + } else { + bij -= c64.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) + if noUnit { + bij /= cmplx.Conj(a[j*lda+j]) + } + } + bi[j] = bij + } + } + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go new file mode 100644 index 00000000..13c4a792 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go @@ -0,0 +1,876 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level3 = Implementation{} + +// Strsm solves one of the matrix equations +// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left +// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right +// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a +// scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := m - 1; i >= 0; i-- { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f32.ScalUnitary(alpha, btmp) + } + for ka, va := range a[i*lda+i+1 : i*lda+m] { + if va != 0 { + k := ka + i + 1 + f32.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + f32.ScalUnitary(tmp, btmp) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f32.ScalUnitary(alpha, btmp) + } + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f32.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + f32.ScalUnitary(tmp, btmp) + } + } + return + } + // Cases where a is transposed + if ul == blas.Upper { + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + f32.ScalUnitary(tmp, btmpk) + } + for ia, va := range a[k*lda+k+1 : k*lda+m] { + if va != 0 { + i := ia + k + 1 + f32.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) + } + } + if alpha != 1 { + f32.ScalUnitary(alpha, btmpk) + } + } + return + } + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + f32.ScalUnitary(tmp, btmpk) + } + for i, va := range a[k*lda : k*lda+k] { + if va != 0 { + f32.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) + } + } + if alpha != 1 { + f32.ScalUnitary(alpha, btmpk) + } + } + return + } + // Cases where a is to the right of X. + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f32.ScalUnitary(alpha, btmp) + } + for k, vb := range btmp { + if vb == 0 { + continue + } + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f32.AxpyUnitary(-btmp[k], a[k*lda+k+1:k*lda+n], btmp[k+1:n]) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f32.ScalUnitary(alpha, btmp) + } + for k := n - 1; k >= 0; k-- { + if btmp[k] == 0 { + continue + } + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f32.AxpyUnitary(-btmp[k], a[k*lda:k*lda+k], btmp[:k]) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := alpha*btmp[j] - f32.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := 0; j < n; j++ { + tmp := alpha*btmp[j] - f32.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } +} + +// Ssymm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C if side == blas.Left +// C = alpha * B * A + beta * C if side == blas.Right +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha +// is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssymm(s blas.Side, ul blas.Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + if s != blas.Right && s != blas.Left { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] *= beta + } + } + return + } + + isUpper := ul == blas.Upper + if s == blas.Left { + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + btmp := b[i*ldb : i*ldb+n] + ctmp := c[i*ldc : i*ldc+n] + for j, v := range btmp { + ctmp[j] *= beta + ctmp[j] += atmp * v + } + + for k := 0; k < i; k++ { + var atmp float32 + if isUpper { + atmp = a[k*lda+i] + } else { + atmp = a[i*lda+k] + } + atmp *= alpha + f32.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) + } + for k := i + 1; k < m; k++ { + var atmp float32 + if isUpper { + atmp = a[i*lda+k] + } else { + atmp = a[k*lda+i] + } + atmp *= alpha + f32.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) + } + } + return + } + if isUpper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + tmp := alpha * b[i*ldb+j] + var tmp2 float32 + atmp := a[j*lda+j+1 : j*lda+n] + btmp := b[i*ldb+j+1 : i*ldb+n] + ctmp := c[i*ldc+j+1 : i*ldc+n] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } + return + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + tmp := alpha * b[i*ldb+j] + var tmp2 float32 + atmp := a[j*lda : j*lda+j] + btmp := b[i*ldb : i*ldb+j] + ctmp := c[i*ldc : i*ldc+j] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } +} + +// Ssyrk performs one of the symmetric rank-k operations +// C = alpha * A * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and +// beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + row, col := k, n + if tA == blas.NoTrans { + row, col = n, k + } + if lda < max(1, col) { + panic(badLdA) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(row-1)+col { + panic(shortA) + } + if len(c) < ldc*(n-1)+n { + panic(shortC) + } + + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + atmp := a[i*lda : i*lda+k] + if beta == 0 { + for jc := range ctmp { + j := jc + i + ctmp[jc] = alpha * f32.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } else { + for jc, vc := range ctmp { + j := jc + i + ctmp[jc] = vc*beta + alpha*f32.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + atmp := a[i*lda : i*lda+k] + if beta == 0 { + for j := range ctmp { + ctmp[j] = alpha * f32.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } else { + for j, vc := range ctmp { + ctmp[j] = vc*beta + alpha*f32.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta == 0 { + for j := range ctmp { + ctmp[j] = 0 + } + } else if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f32.AxpyUnitary(tmp, a[l*lda+i:l*lda+n], ctmp) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f32.AxpyUnitary(tmp, a[l*lda:l*lda+i+1], ctmp) + } + } + } +} + +// Ssyr2k performs one of the symmetric rank 2k operations +// C = alpha * A * B^T + alpha * B * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * B + alpha * B^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and +// alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + row, col := k, n + if tA == blas.NoTrans { + row, col = n, k + } + if lda < max(1, col) { + panic(badLdA) + } + if ldb < max(1, col) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(row-1)+col { + panic(shortA) + } + if len(b) < ldb*(row-1)+col { + panic(shortB) + } + if len(c) < ldc*(n-1)+n { + panic(shortC) + } + + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc+i : i*ldc+n] + for jc := range ctmp { + j := i + jc + var tmp1, tmp2 float32 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[jc] *= beta + ctmp[jc] += alpha * (tmp1 + tmp2) + } + } + return + } + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc : i*ldc+i+1] + for j := 0; j <= i; j++ { + var tmp1, tmp2 float32 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[j] *= beta + ctmp[j] += alpha * (tmp1 + tmp2) + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*ldb+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb+i : l*ldb+n] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda+i : l*lda+n] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*ldb+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb : l*ldb+i+1] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda : l*lda+i+1] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } +} + +// Strmm performs one of the matrix-matrix operations +// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left +// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right +// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + f32.ScalUnitary(tmp, btmp) + for ka, va := range a[i*lda+i+1 : i*lda+m] { + k := ka + i + 1 + if va != 0 { + f32.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + for i := m - 1; i >= 0; i-- { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + f32.ScalUnitary(tmp, btmp) + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f32.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + for ia, va := range a[k*lda+k+1 : k*lda+m] { + i := ia + k + 1 + btmp := b[i*ldb : i*ldb+n] + if va != 0 { + f32.AxpyUnitary(alpha*va, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + f32.ScalUnitary(tmp, btmpk) + } + } + return + } + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + for i, va := range a[k*lda : k*lda+k] { + btmp := b[i*ldb : i*ldb+n] + if va != 0 { + f32.AxpyUnitary(alpha*va, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + f32.ScalUnitary(tmp, btmpk) + } + } + return + } + // Cases where a is on the right + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + tmp := alpha * btmp[k] + if tmp == 0 { + continue + } + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f32.AxpyUnitary(tmp, a[k*lda+k+1:k*lda+n], btmp[k+1:n]) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + tmp := alpha * btmp[k] + if tmp == 0 { + continue + } + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f32.AxpyUnitary(tmp, a[k*lda:k*lda+k], btmp[:k]) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j, vb := range btmp { + tmp := vb + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f32.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) + btmp[j] = alpha * tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := btmp[j] + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f32.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + btmp[j] = alpha * tmp + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go new file mode 100644 index 00000000..9eebd906 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go @@ -0,0 +1,864 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level3 = Implementation{} + +// Dtrsm solves one of the matrix equations +// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left +// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right +// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a +// scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +func (Implementation) Dtrsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := m - 1; i >= 0; i-- { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f64.ScalUnitary(alpha, btmp) + } + for ka, va := range a[i*lda+i+1 : i*lda+m] { + if va != 0 { + k := ka + i + 1 + f64.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + f64.ScalUnitary(tmp, btmp) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f64.ScalUnitary(alpha, btmp) + } + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f64.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + f64.ScalUnitary(tmp, btmp) + } + } + return + } + // Cases where a is transposed + if ul == blas.Upper { + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + f64.ScalUnitary(tmp, btmpk) + } + for ia, va := range a[k*lda+k+1 : k*lda+m] { + if va != 0 { + i := ia + k + 1 + f64.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) + } + } + if alpha != 1 { + f64.ScalUnitary(alpha, btmpk) + } + } + return + } + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + f64.ScalUnitary(tmp, btmpk) + } + for i, va := range a[k*lda : k*lda+k] { + if va != 0 { + f64.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) + } + } + if alpha != 1 { + f64.ScalUnitary(alpha, btmpk) + } + } + return + } + // Cases where a is to the right of X. + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f64.ScalUnitary(alpha, btmp) + } + for k, vb := range btmp { + if vb == 0 { + continue + } + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f64.AxpyUnitary(-btmp[k], a[k*lda+k+1:k*lda+n], btmp[k+1:n]) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f64.ScalUnitary(alpha, btmp) + } + for k := n - 1; k >= 0; k-- { + if btmp[k] == 0 { + continue + } + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f64.AxpyUnitary(-btmp[k], a[k*lda:k*lda+k], btmp[:k]) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := alpha*btmp[j] - f64.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := 0; j < n; j++ { + tmp := alpha*btmp[j] - f64.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } +} + +// Dsymm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C if side == blas.Left +// C = alpha * B * A + beta * C if side == blas.Right +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha +// is a scalar. +func (Implementation) Dsymm(s blas.Side, ul blas.Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + if s != blas.Right && s != blas.Left { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] *= beta + } + } + return + } + + isUpper := ul == blas.Upper + if s == blas.Left { + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + btmp := b[i*ldb : i*ldb+n] + ctmp := c[i*ldc : i*ldc+n] + for j, v := range btmp { + ctmp[j] *= beta + ctmp[j] += atmp * v + } + + for k := 0; k < i; k++ { + var atmp float64 + if isUpper { + atmp = a[k*lda+i] + } else { + atmp = a[i*lda+k] + } + atmp *= alpha + f64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) + } + for k := i + 1; k < m; k++ { + var atmp float64 + if isUpper { + atmp = a[i*lda+k] + } else { + atmp = a[k*lda+i] + } + atmp *= alpha + f64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) + } + } + return + } + if isUpper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + tmp := alpha * b[i*ldb+j] + var tmp2 float64 + atmp := a[j*lda+j+1 : j*lda+n] + btmp := b[i*ldb+j+1 : i*ldb+n] + ctmp := c[i*ldc+j+1 : i*ldc+n] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } + return + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + tmp := alpha * b[i*ldb+j] + var tmp2 float64 + atmp := a[j*lda : j*lda+j] + btmp := b[i*ldb : i*ldb+j] + ctmp := c[i*ldc : i*ldc+j] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } +} + +// Dsyrk performs one of the symmetric rank-k operations +// C = alpha * A * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and +// beta are scalars. +func (Implementation) Dsyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + row, col := k, n + if tA == blas.NoTrans { + row, col = n, k + } + if lda < max(1, col) { + panic(badLdA) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(row-1)+col { + panic(shortA) + } + if len(c) < ldc*(n-1)+n { + panic(shortC) + } + + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + atmp := a[i*lda : i*lda+k] + if beta == 0 { + for jc := range ctmp { + j := jc + i + ctmp[jc] = alpha * f64.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } else { + for jc, vc := range ctmp { + j := jc + i + ctmp[jc] = vc*beta + alpha*f64.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + atmp := a[i*lda : i*lda+k] + if beta == 0 { + for j := range ctmp { + ctmp[j] = alpha * f64.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } else { + for j, vc := range ctmp { + ctmp[j] = vc*beta + alpha*f64.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta == 0 { + for j := range ctmp { + ctmp[j] = 0 + } + } else if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f64.AxpyUnitary(tmp, a[l*lda+i:l*lda+n], ctmp) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f64.AxpyUnitary(tmp, a[l*lda:l*lda+i+1], ctmp) + } + } + } +} + +// Dsyr2k performs one of the symmetric rank 2k operations +// C = alpha * A * B^T + alpha * B * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * B + alpha * B^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and +// alpha and beta are scalars. +func (Implementation) Dsyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + row, col := k, n + if tA == blas.NoTrans { + row, col = n, k + } + if lda < max(1, col) { + panic(badLdA) + } + if ldb < max(1, col) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(row-1)+col { + panic(shortA) + } + if len(b) < ldb*(row-1)+col { + panic(shortB) + } + if len(c) < ldc*(n-1)+n { + panic(shortC) + } + + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc+i : i*ldc+n] + for jc := range ctmp { + j := i + jc + var tmp1, tmp2 float64 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[jc] *= beta + ctmp[jc] += alpha * (tmp1 + tmp2) + } + } + return + } + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc : i*ldc+i+1] + for j := 0; j <= i; j++ { + var tmp1, tmp2 float64 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[j] *= beta + ctmp[j] += alpha * (tmp1 + tmp2) + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*ldb+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb+i : l*ldb+n] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda+i : l*lda+n] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*ldb+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb : l*ldb+i+1] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda : l*lda+i+1] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } +} + +// Dtrmm performs one of the matrix-matrix operations +// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left +// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right +// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. +func (Implementation) Dtrmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + f64.ScalUnitary(tmp, btmp) + for ka, va := range a[i*lda+i+1 : i*lda+m] { + k := ka + i + 1 + if va != 0 { + f64.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + for i := m - 1; i >= 0; i-- { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + f64.ScalUnitary(tmp, btmp) + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f64.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + for ia, va := range a[k*lda+k+1 : k*lda+m] { + i := ia + k + 1 + btmp := b[i*ldb : i*ldb+n] + if va != 0 { + f64.AxpyUnitary(alpha*va, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + f64.ScalUnitary(tmp, btmpk) + } + } + return + } + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + for i, va := range a[k*lda : k*lda+k] { + btmp := b[i*ldb : i*ldb+n] + if va != 0 { + f64.AxpyUnitary(alpha*va, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + f64.ScalUnitary(tmp, btmpk) + } + } + return + } + // Cases where a is on the right + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + tmp := alpha * btmp[k] + if tmp == 0 { + continue + } + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f64.AxpyUnitary(tmp, a[k*lda+k+1:k*lda+n], btmp[k+1:n]) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + tmp := alpha * btmp[k] + if tmp == 0 { + continue + } + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f64.AxpyUnitary(tmp, a[k*lda:k*lda+k], btmp[:k]) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j, vb := range btmp { + tmp := vb + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f64.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) + btmp[j] = alpha * tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := btmp[j] + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f64.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + btmp[j] = alpha * tmp + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go b/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go new file mode 100644 index 00000000..e868a105 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go @@ -0,0 +1,318 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "runtime" + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sgemm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C +// C = alpha * A^T * B + beta * C +// C = alpha * A * B^T + beta * C +// C = alpha * A^T * B^T + beta * C +// where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is +// an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or +// B are transposed. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgemm(tA, tB blas.Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + switch tA { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch tB { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + aTrans := tA == blas.Trans || tA == blas.ConjTrans + if aTrans { + if lda < max(1, m) { + panic(badLdA) + } + } else { + if lda < max(1, k) { + panic(badLdA) + } + } + bTrans := tB == blas.Trans || tB == blas.ConjTrans + if bTrans { + if ldb < max(1, k) { + panic(badLdB) + } + } else { + if ldb < max(1, n) { + panic(badLdB) + } + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if aTrans { + if len(a) < (k-1)*lda+m { + panic(shortA) + } + } else { + if len(a) < (m-1)*lda+k { + panic(shortA) + } + } + if bTrans { + if len(b) < (n-1)*ldb+k { + panic(shortB) + } + } else { + if len(b) < (k-1)*ldb+n { + panic(shortB) + } + } + if len(c) < (m-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + // scale c + if beta != 1 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + } + } + + sgemmParallel(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) +} + +func sgemmParallel(aTrans, bTrans bool, m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // dgemmParallel computes a parallel matrix multiplication by partitioning + // a and b into sub-blocks, and updating c with the multiplication of the sub-block + // In all cases, + // A = [ A_11 A_12 ... A_1j + // A_21 A_22 ... A_2j + // ... + // A_i1 A_i2 ... A_ij] + // + // and same for B. All of the submatrix sizes are blockSize×blockSize except + // at the edges. + // + // In all cases, there is one dimension for each matrix along which + // C must be updated sequentially. + // Cij = \sum_k Aik Bki, (A * B) + // Cij = \sum_k Aki Bkj, (A^T * B) + // Cij = \sum_k Aik Bjk, (A * B^T) + // Cij = \sum_k Aki Bjk, (A^T * B^T) + // + // This code computes one {i, j} block sequentially along the k dimension, + // and computes all of the {i, j} blocks concurrently. This + // partitioning allows Cij to be updated in-place without race-conditions. + // Instead of launching a goroutine for each possible concurrent computation, + // a number of worker goroutines are created and channels are used to pass + // available and completed cases. + // + // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix + // multiplies, though this code does not copy matrices to attempt to eliminate + // cache misses. + + maxKLen := k + parBlocks := blocks(m, blockSize) * blocks(n, blockSize) + if parBlocks < minParBlock { + // The matrix multiplication is small in the dimensions where it can be + // computed concurrently. Just do it in serial. + sgemmSerial(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + } + + nWorkers := runtime.GOMAXPROCS(0) + if parBlocks < nWorkers { + nWorkers = parBlocks + } + // There is a tradeoff between the workers having to wait for work + // and a large buffer making operations slow. + buf := buffMul * nWorkers + if buf > parBlocks { + buf = parBlocks + } + + sendChan := make(chan subMul, buf) + + // Launch workers. A worker receives an {i, j} submatrix of c, and computes + // A_ik B_ki (or the transposed version) storing the result in c_ij. When the + // channel is finally closed, it signals to the waitgroup that it has finished + // computing. + var wg sync.WaitGroup + for i := 0; i < nWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for sub := range sendChan { + i := sub.i + j := sub.j + leni := blockSize + if i+leni > m { + leni = m - i + } + lenj := blockSize + if j+lenj > n { + lenj = n - j + } + + cSub := sliceView32(c, ldc, i, j, leni, lenj) + + // Compute A_ik B_kj for all k + for k := 0; k < maxKLen; k += blockSize { + lenk := blockSize + if k+lenk > maxKLen { + lenk = maxKLen - k + } + var aSub, bSub []float32 + if aTrans { + aSub = sliceView32(a, lda, k, i, lenk, leni) + } else { + aSub = sliceView32(a, lda, i, k, leni, lenk) + } + if bTrans { + bSub = sliceView32(b, ldb, j, k, lenj, lenk) + } else { + bSub = sliceView32(b, ldb, k, j, lenk, lenj) + } + sgemmSerial(aTrans, bTrans, leni, lenj, lenk, aSub, lda, bSub, ldb, cSub, ldc, alpha) + } + } + }() + } + + // Send out all of the {i, j} subblocks for computation. + for i := 0; i < m; i += blockSize { + for j := 0; j < n; j += blockSize { + sendChan <- subMul{ + i: i, + j: j, + } + } + } + close(sendChan) + wg.Wait() +} + +// sgemmSerial is serial matrix multiply +func sgemmSerial(aTrans, bTrans bool, m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + switch { + case !aTrans && !bTrans: + sgemmSerialNotNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && !bTrans: + sgemmSerialTransNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case !aTrans && bTrans: + sgemmSerialNotTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && bTrans: + sgemmSerialTransTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + default: + panic("unreachable") + } +} + +// sgemmSerial where neither a nor b are transposed +func sgemmSerialNotNot(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for l, v := range a[i*lda : i*lda+k] { + tmp := alpha * v + if tmp != 0 { + f32.AxpyUnitary(tmp, b[l*ldb:l*ldb+n], ctmp) + } + } + } +} + +// sgemmSerial where neither a is transposed and b is not +func sgemmSerialTransNot(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + btmp := b[l*ldb : l*ldb+n] + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f32.AxpyUnitary(tmp, btmp, ctmp) + } + } + } +} + +// sgemmSerial where neither a is not transposed and b is +func sgemmSerialNotTrans(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + atmp := a[i*lda : i*lda+k] + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] += alpha * f32.DotUnitary(atmp, b[j*ldb:j*ldb+k]) + } + } +} + +// sgemmSerial where both are transposed +func sgemmSerialTransTrans(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f32.AxpyInc(tmp, b[l:], ctmp, uintptr(n), uintptr(ldb), 1, 0, 0) + } + } + } +} + +func sliceView32(a []float32, lda, i, j, r, c int) []float32 { + return a[i*lda+j : (i+r-1)*lda+j+c] +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash b/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash new file mode 100644 index 00000000..53db63a7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash @@ -0,0 +1,218 @@ +#!/usr/bin/env bash + +# Copyright ©2015 The Gonum Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +WARNINGF32='//\ +// Float32 implementations are autogenerated and not directly tested.\ +' +WARNINGC64='//\ +// Complex64 implementations are autogenerated and not directly tested.\ +' + +# Level1 routines. + +echo Generating level1float32.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32.go +cat level1float64.go \ +| gofmt -r 'blas.Float64Level1 -> blas.Float32Level1' \ +\ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'blas.DrotmParams -> blas.SrotmParams' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +| gofmt -r 'f64.ScalInc -> f32.ScalInc' \ +| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e "s_^\(func (Implementation) \)Id\(.*\)\$_$WARNINGF32\1Is\2_" \ + -e 's_^// Id_// Is_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +>> level1float32.go + +echo Generating level1cmplx64.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1cmplx64.go +cat level1cmplx128.go \ +| gofmt -r 'blas.Complex128Level1 -> blas.Complex64Level1' \ +\ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'complex128 -> complex64' \ +\ +| gofmt -r 'c128.AxpyInc -> c64.AxpyInc' \ +| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \ +| gofmt -r 'c128.DotcInc -> c64.DotcInc' \ +| gofmt -r 'c128.DotcUnitary -> c64.DotcUnitary' \ +| gofmt -r 'c128.DotuInc -> c64.DotuInc' \ +| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \ +| gofmt -r 'c128.ScalInc -> c64.ScalInc' \ +| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \ +| gofmt -r 'dcabs1 -> scabs1' \ +\ +| sed -e "s_^\(func (Implementation) \)Zdot\(.*\)\$_$WARNINGC64\1Cdot\2_" \ + -e 's_^// Zdot_// Cdot_' \ + -e "s_^\(func (Implementation) \)Zdscal\(.*\)\$_$WARNINGC64\1Csscal\2_" \ + -e 's_^// Zdscal_// Csscal_' \ + -e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \ + -e 's_^// Z_// C_' \ + -e "s_^\(func (Implementation) \)Iz\(.*\)\$_$WARNINGC64\1Ic\2_" \ + -e 's_^// Iz_// Ic_' \ + -e "s_^\(func (Implementation) \)Dz\(.*\)\$_$WARNINGC64\1Sc\2_" \ + -e 's_^// Dz_// Sc_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +>> level1cmplx64.go + +echo Generating level1float32_sdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_sdot.go +cat level1float64_ddot.go \ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.DotInc -> f32.DotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1float32_sdot.go + +echo Generating level1float32_dsdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_dsdot.go +cat level1float64_ddot.go \ +| gofmt -r '[]float64 -> []float32' \ +\ +| gofmt -r 'f64.DotInc -> f32.DdotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DdotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1Ds\2_" \ + -e 's_^// D_// Ds_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1float32_dsdot.go + +echo Generating level1float32_sdsdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_sdsdot.go +cat level1float64_ddot.go \ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.DotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)) -> alpha + float32(f32.DdotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)))' \ +| gofmt -r 'f64.DotUnitary(a, b) -> alpha + float32(f32.DdotUnitary(a, b))' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1Sds\2_" \ + -e 's_^// D\(.*\)$_// Sds\1 plus a constant_' \ + -e 's_\\sum_alpha + \\sum_' \ + -e 's/n int/n int, alpha float32/' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1float32_sdsdot.go + + +# Level2 routines. + +echo Generating level2float32.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level2float32.go +cat level2float64.go \ +| gofmt -r 'blas.Float64Level2 -> blas.Float32Level2' \ +\ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyIncTo -> f32.AxpyIncTo' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \ +| gofmt -r 'f64.DotInc -> f32.DotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +| gofmt -r 'f64.ScalInc -> f32.ScalInc' \ +| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \ +| gofmt -r 'f64.Ger -> f32.Ger' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level2float32.go + +echo Generating level2cmplx64.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level2cmplx64.go +cat level2cmplx128.go \ +| gofmt -r 'blas.Complex128Level2 -> blas.Complex64Level2' \ +\ +| gofmt -r 'complex128 -> complex64' \ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'c128.AxpyInc -> c64.AxpyInc' \ +| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \ +| gofmt -r 'c128.DotuInc -> c64.DotuInc' \ +| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \ +| gofmt -r 'c128.ScalInc -> c64.ScalInc' \ +| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \ + -e 's_^// Z_// C_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \ + -e 's_"math/cmplx"_cmplx "gonum.org/v1/gonum/internal/cmplx64"_' \ +>> level2cmplx64.go + +# Level3 routines. + +echo Generating level3float32.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level3float32.go +cat level3float64.go \ +| gofmt -r 'blas.Float64Level3 -> blas.Float32Level3' \ +\ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level3float32.go + +echo Generating sgemm.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > sgemm.go +cat dgemm.go \ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'sliceView64 -> sliceView32' \ +\ +| gofmt -r 'dgemmParallel -> sgemmParallel' \ +| gofmt -r 'computeNumBlocks64 -> computeNumBlocks32' \ +| gofmt -r 'dgemmSerial -> sgemmSerial' \ +| gofmt -r 'dgemmSerialNotNot -> sgemmSerialNotNot' \ +| gofmt -r 'dgemmSerialTransNot -> sgemmSerialTransNot' \ +| gofmt -r 'dgemmSerialNotTrans -> sgemmSerialNotTrans' \ +| gofmt -r 'dgemmSerialTransTrans -> sgemmSerialTransTrans' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_^// d_// s_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> sgemm.go + +echo Generating level3cmplx64.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level3cmplx64.go +cat level3cmplx128.go \ +| gofmt -r 'blas.Complex128Level3 -> blas.Complex64Level3' \ +\ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'complex128 -> complex64' \ +\ +| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \ +| gofmt -r 'c128.DscalUnitary -> c64.SscalUnitary' \ +| gofmt -r 'c128.DotcUnitary -> c64.DotcUnitary' \ +| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \ +| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \ + -e 's_^// Z_// C_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \ + -e 's_"math/cmplx"_cmplx "gonum.org/v1/gonum/internal/cmplx64"_' \ +>> level3cmplx64.go diff --git a/vendor/gonum.org/v1/gonum/floats/README.md b/vendor/gonum.org/v1/gonum/floats/README.md new file mode 100644 index 00000000..ee867bb7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/README.md @@ -0,0 +1,4 @@ +# Gonum floats [![GoDoc](https://godoc.org/gonum.org/v1/gonum/floats?status.svg)](https://godoc.org/gonum.org/v1/gonum/floats) + +Package floats provides a set of helper routines for dealing with slices of float64. +The functions avoid allocations to allow for use within tight loops without garbage collection overhead. diff --git a/vendor/gonum.org/v1/gonum/floats/doc.go b/vendor/gonum.org/v1/gonum/floats/doc.go new file mode 100644 index 00000000..bfe05c19 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/doc.go @@ -0,0 +1,11 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package floats provides a set of helper routines for dealing with slices +// of float64. The functions avoid allocations to allow for use within tight +// loops without garbage collection overhead. +// +// The convention used is that when a slice is being modified in place, it has +// the name dst. +package floats // import "gonum.org/v1/gonum/floats" diff --git a/vendor/gonum.org/v1/gonum/floats/floats.go b/vendor/gonum.org/v1/gonum/floats/floats.go new file mode 100644 index 00000000..ae004a62 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/floats.go @@ -0,0 +1,933 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this code is governed by a BSD-style +// license that can be found in the LICENSE file + +package floats + +import ( + "errors" + "math" + "sort" + "strconv" + + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Add adds, element-wise, the elements of s and dst, and stores in dst. +// Panics if the lengths of dst and s do not match. +func Add(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: length of the slices do not match") + } + f64.AxpyUnitaryTo(dst, 1, s, dst) +} + +// AddTo adds, element-wise, the elements of s and t and +// stores the result in dst. Panics if the lengths of s, t and dst do not match. +func AddTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic("floats: length of adders do not match") + } + if len(dst) != len(s) { + panic("floats: length of destination does not match length of adder") + } + f64.AxpyUnitaryTo(dst, 1, s, t) + return dst +} + +// AddConst adds the scalar c to all of the values in dst. +func AddConst(c float64, dst []float64) { + f64.AddConst(c, dst) +} + +// AddScaled performs dst = dst + alpha * s. +// It panics if the lengths of dst and s are not equal. +func AddScaled(dst []float64, alpha float64, s []float64) { + if len(dst) != len(s) { + panic("floats: length of destination and source to not match") + } + f64.AxpyUnitaryTo(dst, alpha, s, dst) +} + +// AddScaledTo performs dst = y + alpha * s, where alpha is a scalar, +// and dst, y and s are all slices. +// It panics if the lengths of dst, y, and s are not equal. +// +// At the return of the function, dst[i] = y[i] + alpha * s[i] +func AddScaledTo(dst, y []float64, alpha float64, s []float64) []float64 { + if len(dst) != len(s) || len(dst) != len(y) { + panic("floats: lengths of slices do not match") + } + f64.AxpyUnitaryTo(dst, alpha, s, y) + return dst +} + +// argsort is a helper that implements sort.Interface, as used by +// Argsort. +type argsort struct { + s []float64 + inds []int +} + +func (a argsort) Len() int { + return len(a.s) +} + +func (a argsort) Less(i, j int) bool { + return a.s[i] < a.s[j] +} + +func (a argsort) Swap(i, j int) { + a.s[i], a.s[j] = a.s[j], a.s[i] + a.inds[i], a.inds[j] = a.inds[j], a.inds[i] +} + +// Argsort sorts the elements of dst while tracking their original order. +// At the conclusion of Argsort, dst will contain the original elements of dst +// but sorted in increasing order, and inds will contain the original position +// of the elements in the slice such that dst[i] = origDst[inds[i]]. +// It panics if the lengths of dst and inds do not match. +func Argsort(dst []float64, inds []int) { + if len(dst) != len(inds) { + panic("floats: length of inds does not match length of slice") + } + for i := range dst { + inds[i] = i + } + + a := argsort{s: dst, inds: inds} + sort.Sort(a) +} + +// Count applies the function f to every element of s and returns the number +// of times the function returned true. +func Count(f func(float64) bool, s []float64) int { + var n int + for _, val := range s { + if f(val) { + n++ + } + } + return n +} + +// CumProd finds the cumulative product of the first i elements in +// s and puts them in place into the ith element of the +// destination dst. A panic will occur if the lengths of arguments +// do not match. +// +// At the return of the function, dst[i] = s[i] * s[i-1] * s[i-2] * ... +func CumProd(dst, s []float64) []float64 { + if len(dst) != len(s) { + panic("floats: length of destination does not match length of the source") + } + if len(dst) == 0 { + return dst + } + return f64.CumProd(dst, s) +} + +// CumSum finds the cumulative sum of the first i elements in +// s and puts them in place into the ith element of the +// destination dst. A panic will occur if the lengths of arguments +// do not match. +// +// At the return of the function, dst[i] = s[i] + s[i-1] + s[i-2] + ... +func CumSum(dst, s []float64) []float64 { + if len(dst) != len(s) { + panic("floats: length of destination does not match length of the source") + } + if len(dst) == 0 { + return dst + } + return f64.CumSum(dst, s) +} + +// Distance computes the L-norm of s - t. See Norm for special cases. +// A panic will occur if the lengths of s and t do not match. +func Distance(s, t []float64, L float64) float64 { + if len(s) != len(t) { + panic("floats: slice lengths do not match") + } + if len(s) == 0 { + return 0 + } + var norm float64 + if L == 2 { + for i, v := range s { + diff := t[i] - v + norm = math.Hypot(norm, diff) + } + return norm + } + if L == 1 { + for i, v := range s { + norm += math.Abs(t[i] - v) + } + return norm + } + if math.IsInf(L, 1) { + for i, v := range s { + absDiff := math.Abs(t[i] - v) + if absDiff > norm { + norm = absDiff + } + } + return norm + } + for i, v := range s { + norm += math.Pow(math.Abs(t[i]-v), L) + } + return math.Pow(norm, 1/L) +} + +// Div performs element-wise division dst / s +// and stores the value in dst. It panics if the +// lengths of s and t are not equal. +func Div(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: slice lengths do not match") + } + f64.Div(dst, s) +} + +// DivTo performs element-wise division s / t +// and stores the value in dst. It panics if the +// lengths of s, t, and dst are not equal. +func DivTo(dst, s, t []float64) []float64 { + if len(s) != len(t) || len(dst) != len(t) { + panic("floats: slice lengths do not match") + } + return f64.DivTo(dst, s, t) +} + +// Dot computes the dot product of s1 and s2, i.e. +// sum_{i = 1}^N s1[i]*s2[i]. +// A panic will occur if lengths of arguments do not match. +func Dot(s1, s2 []float64) float64 { + if len(s1) != len(s2) { + panic("floats: lengths of the slices do not match") + } + return f64.DotUnitary(s1, s2) +} + +// Equal returns true if the slices have equal lengths and +// all elements are numerically identical. +func Equal(s1, s2 []float64) bool { + if len(s1) != len(s2) { + return false + } + for i, val := range s1 { + if s2[i] != val { + return false + } + } + return true +} + +// EqualApprox returns true if the slices have equal lengths and +// all element pairs have an absolute tolerance less than tol or a +// relative tolerance less than tol. +func EqualApprox(s1, s2 []float64, tol float64) bool { + if len(s1) != len(s2) { + return false + } + for i, a := range s1 { + if !EqualWithinAbsOrRel(a, s2[i], tol, tol) { + return false + } + } + return true +} + +// EqualFunc returns true if the slices have the same lengths +// and the function returns true for all element pairs. +func EqualFunc(s1, s2 []float64, f func(float64, float64) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, val := range s1 { + if !f(val, s2[i]) { + return false + } + } + return true +} + +// EqualWithinAbs returns true if a and b have an absolute +// difference of less than tol. +func EqualWithinAbs(a, b, tol float64) bool { + return a == b || math.Abs(a-b) <= tol +} + +const minNormalFloat64 = 2.2250738585072014e-308 + +// EqualWithinRel returns true if the difference between a and b +// is not greater than tol times the greater value. +func EqualWithinRel(a, b, tol float64) bool { + if a == b { + return true + } + delta := math.Abs(a - b) + if delta <= minNormalFloat64 { + return delta <= tol*minNormalFloat64 + } + // We depend on the division in this relationship to identify + // infinities (we rely on the NaN to fail the test) otherwise + // we compare Infs of the same sign and evaluate Infs as equal + // independent of sign. + return delta/math.Max(math.Abs(a), math.Abs(b)) <= tol +} + +// EqualWithinAbsOrRel returns true if a and b are equal to within +// the absolute tolerance. +func EqualWithinAbsOrRel(a, b, absTol, relTol float64) bool { + if EqualWithinAbs(a, b, absTol) { + return true + } + return EqualWithinRel(a, b, relTol) +} + +// EqualWithinULP returns true if a and b are equal to within +// the specified number of floating point units in the last place. +func EqualWithinULP(a, b float64, ulp uint) bool { + if a == b { + return true + } + if math.IsNaN(a) || math.IsNaN(b) { + return false + } + if math.Signbit(a) != math.Signbit(b) { + return math.Float64bits(math.Abs(a))+math.Float64bits(math.Abs(b)) <= uint64(ulp) + } + return ulpDiff(math.Float64bits(a), math.Float64bits(b)) <= uint64(ulp) +} + +func ulpDiff(a, b uint64) uint64 { + if a > b { + return a - b + } + return b - a +} + +// EqualLengths returns true if all of the slices have equal length, +// and false otherwise. Returns true if there are no input slices. +func EqualLengths(slices ...[]float64) bool { + // This length check is needed: http://play.golang.org/p/sdty6YiLhM + if len(slices) == 0 { + return true + } + l := len(slices[0]) + for i := 1; i < len(slices); i++ { + if len(slices[i]) != l { + return false + } + } + return true +} + +// Find applies f to every element of s and returns the indices of the first +// k elements for which the f returns true, or all such elements +// if k < 0. +// Find will reslice inds to have 0 length, and will append +// found indices to inds. +// If k > 0 and there are fewer than k elements in s satisfying f, +// all of the found elements will be returned along with an error. +// At the return of the function, the input inds will be in an undetermined state. +func Find(inds []int, f func(float64) bool, s []float64, k int) ([]int, error) { + // inds is also returned to allow for calling with nil + + // Reslice inds to have zero length + inds = inds[:0] + + // If zero elements requested, can just return + if k == 0 { + return inds, nil + } + + // If k < 0, return all of the found indices + if k < 0 { + for i, val := range s { + if f(val) { + inds = append(inds, i) + } + } + return inds, nil + } + + // Otherwise, find the first k elements + nFound := 0 + for i, val := range s { + if f(val) { + inds = append(inds, i) + nFound++ + if nFound == k { + return inds, nil + } + } + } + // Finished iterating over the loop, which means k elements were not found + return inds, errors.New("floats: insufficient elements found") +} + +// HasNaN returns true if the slice s has any values that are NaN and false +// otherwise. +func HasNaN(s []float64) bool { + for _, v := range s { + if math.IsNaN(v) { + return true + } + } + return false +} + +// LogSpan returns a set of n equally spaced points in log space between, +// l and u where N is equal to len(dst). The first element of the +// resulting dst will be l and the final element of dst will be u. +// Panics if len(dst) < 2 +// Note that this call will return NaNs if either l or u are negative, and +// will return all zeros if l or u is zero. +// Also returns the mutated slice dst, so that it can be used in range, like: +// +// for i, x := range LogSpan(dst, l, u) { ... } +func LogSpan(dst []float64, l, u float64) []float64 { + Span(dst, math.Log(l), math.Log(u)) + for i := range dst { + dst[i] = math.Exp(dst[i]) + } + return dst +} + +// LogSumExp returns the log of the sum of the exponentials of the values in s. +// Panics if s is an empty slice. +func LogSumExp(s []float64) float64 { + // Want to do this in a numerically stable way which avoids + // overflow and underflow + // First, find the maximum value in the slice. + maxval := Max(s) + if math.IsInf(maxval, 0) { + // If it's infinity either way, the logsumexp will be infinity as well + // returning now avoids NaNs + return maxval + } + var lse float64 + // Compute the sumexp part + for _, val := range s { + lse += math.Exp(val - maxval) + } + // Take the log and add back on the constant taken out + return math.Log(lse) + maxval +} + +// Max returns the maximum value in the input slice. If the slice is empty, Max will panic. +func Max(s []float64) float64 { + return s[MaxIdx(s)] +} + +// MaxIdx returns the index of the maximum value in the input slice. If several +// entries have the maximum value, the first such index is returned. If the slice +// is empty, MaxIdx will panic. +func MaxIdx(s []float64) int { + if len(s) == 0 { + panic("floats: zero slice length") + } + max := math.NaN() + var ind int + for i, v := range s { + if math.IsNaN(v) { + continue + } + if v > max || math.IsNaN(max) { + max = v + ind = i + } + } + return ind +} + +// Min returns the maximum value in the input slice. If the slice is empty, Min will panic. +func Min(s []float64) float64 { + return s[MinIdx(s)] +} + +// MinIdx returns the index of the minimum value in the input slice. If several +// entries have the maximum value, the first such index is returned. If the slice +// is empty, MinIdx will panic. +func MinIdx(s []float64) int { + if len(s) == 0 { + panic("floats: zero slice length") + } + min := math.NaN() + var ind int + for i, v := range s { + if math.IsNaN(v) { + continue + } + if v < min || math.IsNaN(min) { + min = v + ind = i + } + } + return ind +} + +// Mul performs element-wise multiplication between dst +// and s and stores the value in dst. Panics if the +// lengths of s and t are not equal. +func Mul(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: slice lengths do not match") + } + for i, val := range s { + dst[i] *= val + } +} + +// MulTo performs element-wise multiplication between s +// and t and stores the value in dst. Panics if the +// lengths of s, t, and dst are not equal. +func MulTo(dst, s, t []float64) []float64 { + if len(s) != len(t) || len(dst) != len(t) { + panic("floats: slice lengths do not match") + } + for i, val := range t { + dst[i] = val * s[i] + } + return dst +} + +const ( + nanBits = 0x7ff8000000000000 + nanMask = 0xfff8000000000000 +) + +// NaNWith returns an IEEE 754 "quiet not-a-number" value with the +// payload specified in the low 51 bits of payload. +// The NaN returned by math.NaN has a bit pattern equal to NaNWith(1). +func NaNWith(payload uint64) float64 { + return math.Float64frombits(nanBits | (payload &^ nanMask)) +} + +// NaNPayload returns the lowest 51 bits payload of an IEEE 754 "quiet +// not-a-number". For values of f other than quiet-NaN, NaNPayload +// returns zero and false. +func NaNPayload(f float64) (payload uint64, ok bool) { + b := math.Float64bits(f) + if b&nanBits != nanBits { + return 0, false + } + return b &^ nanMask, true +} + +// NearestIdx returns the index of the element in s +// whose value is nearest to v. If several such +// elements exist, the lowest index is returned. +// NearestIdx panics if len(s) == 0. +func NearestIdx(s []float64, v float64) int { + if len(s) == 0 { + panic("floats: zero length slice") + } + switch { + case math.IsNaN(v): + return 0 + case math.IsInf(v, 1): + return MaxIdx(s) + case math.IsInf(v, -1): + return MinIdx(s) + } + var ind int + dist := math.NaN() + for i, val := range s { + newDist := math.Abs(v - val) + // A NaN distance will not be closer. + if math.IsNaN(newDist) { + continue + } + if newDist < dist || math.IsNaN(dist) { + dist = newDist + ind = i + } + } + return ind +} + +// NearestIdxForSpan return the index of a hypothetical vector created +// by Span with length n and bounds l and u whose value is closest +// to v. That is, NearestIdxForSpan(n, l, u, v) is equivalent to +// Nearest(Span(make([]float64, n),l,u),v) without an allocation. +// NearestIdxForSpan panics if n is less than two. +func NearestIdxForSpan(n int, l, u float64, v float64) int { + if n <= 1 { + panic("floats: span must have length >1") + } + if math.IsNaN(v) { + return 0 + } + + // Special cases for Inf and NaN. + switch { + case math.IsNaN(l) && !math.IsNaN(u): + return n - 1 + case math.IsNaN(u): + return 0 + case math.IsInf(l, 0) && math.IsInf(u, 0): + if l == u { + return 0 + } + if n%2 == 1 { + if !math.IsInf(v, 0) { + return n / 2 + } + if math.Copysign(1, v) == math.Copysign(1, l) { + return 0 + } + return n/2 + 1 + } + if math.Copysign(1, v) == math.Copysign(1, l) { + return 0 + } + return n / 2 + case math.IsInf(l, 0): + if v == l { + return 0 + } + return n - 1 + case math.IsInf(u, 0): + if v == u { + return n - 1 + } + return 0 + case math.IsInf(v, -1): + if l <= u { + return 0 + } + return n - 1 + case math.IsInf(v, 1): + if u <= l { + return 0 + } + return n - 1 + } + + // Special cases for v outside (l, u) and (u, l). + switch { + case l < u: + if v <= l { + return 0 + } + if v >= u { + return n - 1 + } + case l > u: + if v >= l { + return 0 + } + if v <= u { + return n - 1 + } + default: + return 0 + } + + // Can't guarantee anything about exactly halfway between + // because of floating point weirdness. + return int((float64(n)-1)/(u-l)*(v-l) + 0.5) +} + +// Norm returns the L norm of the slice S, defined as +// (sum_{i=1}^N s[i]^L)^{1/L} +// Special cases: +// L = math.Inf(1) gives the maximum absolute value. +// Does not correctly compute the zero norm (use Count). +func Norm(s []float64, L float64) float64 { + // Should this complain if L is not positive? + // Should this be done in log space for better numerical stability? + // would be more cost + // maybe only if L is high? + if len(s) == 0 { + return 0 + } + if L == 2 { + twoNorm := math.Abs(s[0]) + for i := 1; i < len(s); i++ { + twoNorm = math.Hypot(twoNorm, s[i]) + } + return twoNorm + } + var norm float64 + if L == 1 { + for _, val := range s { + norm += math.Abs(val) + } + return norm + } + if math.IsInf(L, 1) { + for _, val := range s { + norm = math.Max(norm, math.Abs(val)) + } + return norm + } + for _, val := range s { + norm += math.Pow(math.Abs(val), L) + } + return math.Pow(norm, 1/L) +} + +// ParseWithNA converts the string s to a float64 in v. +// If s equals missing, w is returned as 0, otherwise 1. +func ParseWithNA(s, missing string) (v, w float64, err error) { + if s == missing { + return 0, 0, nil + } + v, err = strconv.ParseFloat(s, 64) + if err == nil { + w = 1 + } + return v, w, err +} + +// Prod returns the product of the elements of the slice. +// Returns 1 if len(s) = 0. +func Prod(s []float64) float64 { + prod := 1.0 + for _, val := range s { + prod *= val + } + return prod +} + +// Reverse reverses the order of elements in the slice. +func Reverse(s []float64) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +// Round returns the half away from zero rounded value of x with prec precision. +// +// Special cases are: +// Round(±0) = +0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64, prec int) float64 { + if x == 0 { + // Make sure zero is returned + // without the negative bit set. + return 0 + } + // Fast path for positive precision on integers. + if prec >= 0 && x == math.Trunc(x) { + return x + } + pow := math.Pow10(prec) + intermed := x * pow + if math.IsInf(intermed, 0) { + return x + } + if x < 0 { + x = math.Ceil(intermed - 0.5) + } else { + x = math.Floor(intermed + 0.5) + } + + if x == 0 { + return 0 + } + + return x / pow +} + +// RoundEven returns the half even rounded value of x with prec precision. +// +// Special cases are: +// RoundEven(±0) = +0 +// RoundEven(±Inf) = ±Inf +// RoundEven(NaN) = NaN +func RoundEven(x float64, prec int) float64 { + if x == 0 { + // Make sure zero is returned + // without the negative bit set. + return 0 + } + // Fast path for positive precision on integers. + if prec >= 0 && x == math.Trunc(x) { + return x + } + pow := math.Pow10(prec) + intermed := x * pow + if math.IsInf(intermed, 0) { + return x + } + if isHalfway(intermed) { + correction, _ := math.Modf(math.Mod(intermed, 2)) + intermed += correction + if intermed > 0 { + x = math.Floor(intermed) + } else { + x = math.Ceil(intermed) + } + } else { + if x < 0 { + x = math.Ceil(intermed - 0.5) + } else { + x = math.Floor(intermed + 0.5) + } + } + + if x == 0 { + return 0 + } + + return x / pow +} + +func isHalfway(x float64) bool { + _, frac := math.Modf(x) + frac = math.Abs(frac) + return frac == 0.5 || (math.Nextafter(frac, math.Inf(-1)) < 0.5 && math.Nextafter(frac, math.Inf(1)) > 0.5) +} + +// Same returns true if the input slices have the same length and the all elements +// have the same value with NaN treated as the same. +func Same(s, t []float64) bool { + if len(s) != len(t) { + return false + } + for i, v := range s { + w := t[i] + if v != w && !(math.IsNaN(v) && math.IsNaN(w)) { + return false + } + } + return true +} + +// Scale multiplies every element in dst by the scalar c. +func Scale(c float64, dst []float64) { + if len(dst) > 0 { + f64.ScalUnitary(c, dst) + } +} + +// ScaleTo multiplies the elements in s by c and stores the result in dst. +func ScaleTo(dst []float64, c float64, s []float64) []float64 { + if len(dst) != len(s) { + panic("floats: lengths of slices do not match") + } + if len(dst) > 0 { + f64.ScalUnitaryTo(dst, c, s) + } + return dst +} + +// Span returns a set of N equally spaced points between l and u, where N +// is equal to the length of the destination. The first element of the destination +// is l, the final element of the destination is u. +// +// Panics if len(dst) < 2. +// +// Span also returns the mutated slice dst, so that it can be used in range expressions, +// like: +// +// for i, x := range Span(dst, l, u) { ... } +func Span(dst []float64, l, u float64) []float64 { + n := len(dst) + if n < 2 { + panic("floats: destination must have length >1") + } + + // Special cases for Inf and NaN. + switch { + case math.IsNaN(l): + for i := range dst[:len(dst)-1] { + dst[i] = math.NaN() + } + dst[len(dst)-1] = u + return dst + case math.IsNaN(u): + for i := range dst[1:] { + dst[i+1] = math.NaN() + } + dst[0] = l + return dst + case math.IsInf(l, 0) && math.IsInf(u, 0): + for i := range dst[:len(dst)/2] { + dst[i] = l + dst[len(dst)-i-1] = u + } + if len(dst)%2 == 1 { + if l != u { + dst[len(dst)/2] = 0 + } else { + dst[len(dst)/2] = l + } + } + return dst + case math.IsInf(l, 0): + for i := range dst[:len(dst)-1] { + dst[i] = l + } + dst[len(dst)-1] = u + return dst + case math.IsInf(u, 0): + for i := range dst[1:] { + dst[i+1] = u + } + dst[0] = l + return dst + } + + step := (u - l) / float64(n-1) + for i := range dst { + dst[i] = l + step*float64(i) + } + return dst +} + +// Sub subtracts, element-wise, the elements of s from dst. Panics if +// the lengths of dst and s do not match. +func Sub(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: length of the slices do not match") + } + f64.AxpyUnitaryTo(dst, -1, s, dst) +} + +// SubTo subtracts, element-wise, the elements of t from s and +// stores the result in dst. Panics if the lengths of s, t and dst do not match. +func SubTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic("floats: length of subtractor and subtractee do not match") + } + if len(dst) != len(s) { + panic("floats: length of destination does not match length of subtractor") + } + f64.AxpyUnitaryTo(dst, -1, t, s) + return dst +} + +// Sum returns the sum of the elements of the slice. +func Sum(s []float64) float64 { + return f64.Sum(s) +} + +// Within returns the first index i where s[i] <= v < s[i+1]. Within panics if: +// - len(s) < 2 +// - s is not sorted +func Within(s []float64, v float64) int { + if len(s) < 2 { + panic("floats: slice length less than 2") + } + if !sort.Float64sAreSorted(s) { + panic("floats: input slice not sorted") + } + if v < s[0] || v >= s[len(s)-1] || math.IsNaN(v) { + return -1 + } + for i, f := range s[1:] { + if v < f { + return i + } + } + return -1 +} diff --git a/vendor/gonum.org/v1/gonum/graph/.gitignore b/vendor/gonum.org/v1/gonum/graph/.gitignore new file mode 100644 index 00000000..86e0d240 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/.gitignore @@ -0,0 +1 @@ +test.out \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/graph/README.md b/vendor/gonum.org/v1/gonum/graph/README.md new file mode 100644 index 00000000..f0a9505e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/README.md @@ -0,0 +1,3 @@ +# Gonum graph [![GoDoc](https://godoc.org/gonum.org/v1/gonum/graph?status.svg)](https://godoc.org/gonum.org/v1/gonum/graph) + +This is a generalized graph package for the Go language. diff --git a/vendor/gonum.org/v1/gonum/graph/doc.go b/vendor/gonum.org/v1/gonum/graph/doc.go new file mode 100644 index 00000000..7eedd09c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/doc.go @@ -0,0 +1,9 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package graph defines graph interfaces. +// +// Routines to test contract compliance by user implemented graph types +// are available in gonum.org/v1/gonum/graph/testgraph. +package graph // import "gonum.org/v1/gonum/graph" diff --git a/vendor/gonum.org/v1/gonum/graph/graph.go b/vendor/gonum.org/v1/gonum/graph/graph.go new file mode 100644 index 00000000..c973583d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/graph.go @@ -0,0 +1,282 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Node is a graph node. It returns a graph-unique integer ID. +type Node interface { + ID() int64 +} + +// Edge is a graph edge. In directed graphs, the direction of the +// edge is given from -> to, otherwise the edge is semantically +// unordered. +type Edge interface { + // From returns the from node of the edge. + From() Node + + // To returns the to node of the edge. + To() Node + + // ReversedEdge returns an edge that has + // the end points of the receiver swapped. + ReversedEdge() Edge +} + +// WeightedEdge is a weighted graph edge. In directed graphs, the direction +// of the edge is given from -> to, otherwise the edge is semantically +// unordered. +type WeightedEdge interface { + Edge + Weight() float64 +} + +// Graph is a generalized graph. +type Graph interface { + // Node returns the node with the given ID if it exists + // in the graph, and nil otherwise. + Node(id int64) Node + + // Nodes returns all the nodes in the graph. + // + // Nodes must not return nil. + Nodes() Nodes + + // From returns all nodes that can be reached directly + // from the node with the given ID. + // + // From must not return nil. + From(id int64) Nodes + + // HasEdgeBetween returns whether an edge exists between + // nodes with IDs xid and yid without considering direction. + HasEdgeBetween(xid, yid int64) bool + + // Edge returns the edge from u to v, with IDs uid and vid, + // if such an edge exists and nil otherwise. The node v + // must be directly reachable from u as defined by the + // From method. + Edge(uid, vid int64) Edge +} + +// Weighted is a weighted graph. +type Weighted interface { + Graph + + // WeightedEdge returns the weighted edge from u to v + // with IDs uid and vid if such an edge exists and + // nil otherwise. The node v must be directly + // reachable from u as defined by the From method. + WeightedEdge(uid, vid int64) WeightedEdge + + // Weight returns the weight for the edge between + // x and y with IDs xid and yid if Edge(xid, yid) + // returns a non-nil Edge. + // If x and y are the same node or there is no + // joining edge between the two nodes the weight + // value returned is implementation dependent. + // Weight returns true if an edge exists between + // x and y or if x and y have the same ID, false + // otherwise. + Weight(xid, yid int64) (w float64, ok bool) +} + +// Undirected is an undirected graph. +type Undirected interface { + Graph + + // EdgeBetween returns the edge between nodes x and y + // with IDs xid and yid. + EdgeBetween(xid, yid int64) Edge +} + +// WeightedUndirected is a weighted undirected graph. +type WeightedUndirected interface { + Weighted + + // WeightedEdgeBetween returns the edge between nodes + // x and y with IDs xid and yid. + WeightedEdgeBetween(xid, yid int64) WeightedEdge +} + +// Directed is a directed graph. +type Directed interface { + Graph + + // HasEdgeFromTo returns whether an edge exists + // in the graph from u to v with IDs uid and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + // + // To must not return nil. + To(id int64) Nodes +} + +// WeightedDirected is a weighted directed graph. +type WeightedDirected interface { + Weighted + + // HasEdgeFromTo returns whether an edge exists + // in the graph from u to v with the IDs uid and + // vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + // + // To must not return nil. + To(id int64) Nodes +} + +// NodeAdder is an interface for adding arbitrary nodes to a graph. +type NodeAdder interface { + // NewNode returns a new Node with a unique + // arbitrary ID. + NewNode() Node + + // AddNode adds a node to the graph. AddNode panics if + // the added node ID matches an existing node ID. + AddNode(Node) +} + +// NodeRemover is an interface for removing nodes from a graph. +type NodeRemover interface { + // RemoveNode removes the node with the given ID + // from the graph, as well as any edges attached + // to it. If the node is not in the graph it is + // a no-op. + RemoveNode(id int64) +} + +// EdgeAdder is an interface for adding edges to a graph. +type EdgeAdder interface { + // NewEdge returns a new Edge from the source to the destination node. + NewEdge(from, to Node) Edge + + // SetEdge adds an edge from one node to another. + // If the graph supports node addition the nodes + // will be added if they do not exist, otherwise + // SetEdge will panic. + // The behavior of an EdgeAdder when the IDs + // returned by e.From() and e.To() are equal is + // implementation-dependent. + // Whether e, e.From() and e.To() are stored + // within the graph is implementation dependent. + SetEdge(e Edge) +} + +// WeightedEdgeAdder is an interface for adding edges to a graph. +type WeightedEdgeAdder interface { + // NewWeightedEdge returns a new WeightedEdge from + // the source to the destination node. + NewWeightedEdge(from, to Node, weight float64) WeightedEdge + + // SetWeightedEdge adds an edge from one node to + // another. If the graph supports node addition + // the nodes will be added if they do not exist, + // otherwise SetWeightedEdge will panic. + // The behavior of a WeightedEdgeAdder when the IDs + // returned by e.From() and e.To() are equal is + // implementation-dependent. + // Whether e, e.From() and e.To() are stored + // within the graph is implementation dependent. + SetWeightedEdge(e WeightedEdge) +} + +// EdgeRemover is an interface for removing nodes from a graph. +type EdgeRemover interface { + // RemoveEdge removes the edge with the given end + // IDs, leaving the terminal nodes. If the edge + // does not exist it is a no-op. + RemoveEdge(fid, tid int64) +} + +// Builder is a graph that can have nodes and edges added. +type Builder interface { + NodeAdder + EdgeAdder +} + +// WeightedBuilder is a graph that can have nodes and weighted edges added. +type WeightedBuilder interface { + NodeAdder + WeightedEdgeAdder +} + +// UndirectedBuilder is an undirected graph builder. +type UndirectedBuilder interface { + Undirected + Builder +} + +// UndirectedWeightedBuilder is an undirected weighted graph builder. +type UndirectedWeightedBuilder interface { + Undirected + WeightedBuilder +} + +// DirectedBuilder is a directed graph builder. +type DirectedBuilder interface { + Directed + Builder +} + +// DirectedWeightedBuilder is a directed weighted graph builder. +type DirectedWeightedBuilder interface { + Directed + WeightedBuilder +} + +// Copy copies nodes and edges as undirected edges from the source to the destination +// without first clearing the destination. Copy will panic if a node ID in the source +// graph matches a node ID in the destination. +// +// If the source is undirected and the destination is directed both directions will +// be present in the destination after the copy is complete. +func Copy(dst Builder, src Graph) { + nodes := src.Nodes() + for nodes.Next() { + dst.AddNode(nodes.Node()) + } + nodes.Reset() + for nodes.Next() { + u := nodes.Node() + uid := u.ID() + to := src.From(uid) + for to.Next() { + v := to.Node() + dst.SetEdge(src.Edge(uid, v.ID())) + } + } +} + +// CopyWeighted copies nodes and edges as undirected edges from the source to the destination +// without first clearing the destination. Copy will panic if a node ID in the source +// graph matches a node ID in the destination. +// +// If the source is undirected and the destination is directed both directions will +// be present in the destination after the copy is complete. +// +// If the source is a directed graph, the destination is undirected, and a fundamental +// cycle exists with two nodes where the edge weights differ, the resulting destination +// graph's edge weight between those nodes is undefined. If there is a defined function +// to resolve such conflicts, an UndirectWeighted may be used to do this. +func CopyWeighted(dst WeightedBuilder, src Weighted) { + nodes := src.Nodes() + for nodes.Next() { + dst.AddNode(nodes.Node()) + } + nodes.Reset() + for nodes.Next() { + u := nodes.Node() + uid := u.ID() + to := src.From(uid) + for to.Next() { + v := to.Node() + dst.SetWeightedEdge(src.WeightedEdge(uid, v.ID())) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go new file mode 100644 index 00000000..88d1cb80 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linear provides common linear data structures. +package linear // import "gonum.org/v1/gonum/graph/internal/linear" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go b/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go new file mode 100644 index 00000000..62e19db6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go @@ -0,0 +1,73 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package linear + +import ( + "gonum.org/v1/gonum/graph" +) + +// NodeStack implements a LIFO stack of graph.Node. +type NodeStack []graph.Node + +// Len returns the number of graph.Nodes on the stack. +func (s *NodeStack) Len() int { return len(*s) } + +// Pop returns the last graph.Node on the stack and removes it +// from the stack. +func (s *NodeStack) Pop() graph.Node { + v := *s + v, n := v[:len(v)-1], v[len(v)-1] + *s = v + return n +} + +// Push adds the node n to the stack at the last position. +func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) } + +// NodeQueue implements a FIFO queue. +type NodeQueue struct { + head int + data []graph.Node +} + +// Len returns the number of graph.Nodes in the queue. +func (q *NodeQueue) Len() int { return len(q.data) - q.head } + +// Enqueue adds the node n to the back of the queue. +func (q *NodeQueue) Enqueue(n graph.Node) { + if len(q.data) == cap(q.data) && q.head > 0 { + l := q.Len() + copy(q.data, q.data[q.head:]) + q.head = 0 + q.data = append(q.data[:l], n) + } else { + q.data = append(q.data, n) + } +} + +// Dequeue returns the graph.Node at the front of the queue and +// removes it from the queue. +func (q *NodeQueue) Dequeue() graph.Node { + if q.Len() == 0 { + panic("queue: empty queue") + } + + var n graph.Node + n, q.data[q.head] = q.data[q.head], nil + q.head++ + + if q.Len() == 0 { + q.head = 0 + q.data = q.data[:0] + } + + return n +} + +// Reset clears the queue for reuse. +func (q *NodeQueue) Reset() { + q.head = 0 + q.data = q.data[:0] +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go new file mode 100644 index 00000000..563df6f2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ordered provides common sort ordering types. +package ordered // import "gonum.org/v1/gonum/graph/internal/ordered" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go new file mode 100644 index 00000000..a7250d1f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go @@ -0,0 +1,93 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ordered + +import "gonum.org/v1/gonum/graph" + +// ByID implements the sort.Interface sorting a slice of graph.Node +// by ID. +type ByID []graph.Node + +func (n ByID) Len() int { return len(n) } +func (n ByID) Less(i, j int) bool { return n[i].ID() < n[j].ID() } +func (n ByID) Swap(i, j int) { n[i], n[j] = n[j], n[i] } + +// BySliceValues implements the sort.Interface sorting a slice of +// []int64 lexically by the values of the []int64. +type BySliceValues [][]int64 + +func (c BySliceValues) Len() int { return len(c) } +func (c BySliceValues) Less(i, j int) bool { + a, b := c[i], c[j] + l := len(a) + if len(b) < l { + l = len(b) + } + for k, v := range a[:l] { + if v < b[k] { + return true + } + if v > b[k] { + return false + } + } + return len(a) < len(b) +} +func (c BySliceValues) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// BySliceIDs implements the sort.Interface sorting a slice of +// []graph.Node lexically by the IDs of the []graph.Node. +type BySliceIDs [][]graph.Node + +func (c BySliceIDs) Len() int { return len(c) } +func (c BySliceIDs) Less(i, j int) bool { + a, b := c[i], c[j] + l := len(a) + if len(b) < l { + l = len(b) + } + for k, v := range a[:l] { + if v.ID() < b[k].ID() { + return true + } + if v.ID() > b[k].ID() { + return false + } + } + return len(a) < len(b) +} +func (c BySliceIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// Int64s implements the sort.Interface sorting a slice of +// int64. +type Int64s []int64 + +func (s Int64s) Len() int { return len(s) } +func (s Int64s) Less(i, j int) bool { return s[i] < s[j] } +func (s Int64s) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Reverse reverses the order of nodes. +func Reverse(nodes []graph.Node) { + for i, j := 0, len(nodes)-1; i < j; i, j = i+1, j-1 { + nodes[i], nodes[j] = nodes[j], nodes[i] + } +} + +// LinesByIDs implements the sort.Interface sorting a slice of graph.LinesByIDs +// lexically by the From IDs, then by the To IDs, finally by the Line IDs. +type LinesByIDs []graph.Line + +func (n LinesByIDs) Len() int { return len(n) } +func (n LinesByIDs) Less(i, j int) bool { + a, b := n[i], n[j] + if a.From().ID() != b.From().ID() { + return a.From().ID() < b.From().ID() + } + if a.To().ID() != b.To().ID() { + return a.To().ID() < b.To().ID() + } + return n[i].ID() < n[j].ID() +} +func (n LinesByIDs) Swap(i, j int) { n[i], n[j] = n[j], n[i] } diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go new file mode 100644 index 00000000..86f2afc4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides integer and graph.Node sets. +package set // import "gonum.org/v1/gonum/graph/internal/set" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go new file mode 100644 index 00000000..f95a4e12 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go @@ -0,0 +1,36 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!safe + +package set + +import "unsafe" + +// same determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func same(a, b Nodes) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} + +// intsSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func intsSame(a, b Ints) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} + +// int64sSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func int64sSame(a, b Int64s) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go new file mode 100644 index 00000000..4ff4f4ed --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go @@ -0,0 +1,36 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine safe + +package set + +import "reflect" + +// same determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func same(a, b Nodes) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} + +// intsSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func intsSame(a, b Ints) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} + +// int64sSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func int64sSame(a, b Int64s) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/set.go b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go new file mode 100644 index 00000000..0506b8e9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go @@ -0,0 +1,228 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package set + +import "gonum.org/v1/gonum/graph" + +// Ints is a set of int identifiers. +type Ints map[int]struct{} + +// The simple accessor methods for Ints are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Ints) Add(e int) { + s[e] = struct{}{} +} + +// Has reports the existence of the element in the set. +func (s Ints) Has(e int) bool { + _, ok := s[e] + return ok +} + +// Remove deletes the specified element from the set. +func (s Ints) Remove(e int) { + delete(s, e) +} + +// Count reports the number of elements stored in the set. +func (s Ints) Count() int { + return len(s) +} + +// IntsEqual reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func IntsEqual(a, b Ints) bool { + if intsSame(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// Int64s is a set of int64 identifiers. +type Int64s map[int64]struct{} + +// The simple accessor methods for Ints are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Int64s) Add(e int64) { + s[e] = struct{}{} +} + +// Has reports the existence of the element in the set. +func (s Int64s) Has(e int64) bool { + _, ok := s[e] + return ok +} + +// Remove deletes the specified element from the set. +func (s Int64s) Remove(e int64) { + delete(s, e) +} + +// Count reports the number of elements stored in the set. +func (s Int64s) Count() int { + return len(s) +} + +// Int64sEqual reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func Int64sEqual(a, b Int64s) bool { + if int64sSame(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// Nodes is a set of nodes keyed in their integer identifiers. +type Nodes map[int64]graph.Node + +// NewNodes returns a new Nodes. +func NewNodes() Nodes { + return make(Nodes) +} + +// NewNodes returns a new Nodes with the given size hint, n. +func NewNodesSize(n int) Nodes { + return make(Nodes, n) +} + +// The simple accessor methods for Nodes are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Nodes) Add(n graph.Node) { + s[n.ID()] = n +} + +// Remove deletes the specified element from the set. +func (s Nodes) Remove(e graph.Node) { + delete(s, e.ID()) +} + +// Count returns the number of element in the set. +func (s Nodes) Count() int { + return len(s) +} + +// Has reports the existence of the elements in the set. +func (s Nodes) Has(n graph.Node) bool { + _, ok := s[n.ID()] + return ok +} + +// CloneNodes returns a clone of src. +func CloneNodes(src Nodes) Nodes { + dst := make(Nodes, len(src)) + for e, n := range src { + dst[e] = n + } + return dst +} + +// Equal reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func Equal(a, b Nodes) bool { + if same(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// UnionOfNodes returns the union of a and b. +// +// The union of two sets, a and b, is the set containing all the +// elements of each, for instance: +// +// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} +// +// Since sets may not have repetition, unions of two sets that overlap +// do not contain repeat elements, that is: +// +// {a,b,c} UNION {b,c,d} = {a,b,c,d} +// +func UnionOfNodes(a, b Nodes) Nodes { + if same(a, b) { + return CloneNodes(a) + } + + dst := make(Nodes) + for e, n := range a { + dst[e] = n + } + for e, n := range b { + dst[e] = n + } + + return dst +} + +// IntersectionOfNodes returns the intersection of a and b. +// +// The intersection of two sets, a and b, is the set containing all +// the elements shared between the two sets, for instance: +// +// {a,b,c} INTERSECT {b,c,d} = {b,c} +// +// The intersection between a set and itself is itself, and thus +// effectively a copy operation: +// +// {a,b,c} INTERSECT {a,b,c} = {a,b,c} +// +// The intersection between two sets that share no elements is the empty +// set: +// +// {a,b,c} INTERSECT {d,e,f} = {} +// +func IntersectionOfNodes(a, b Nodes) Nodes { + if same(a, b) { + return CloneNodes(a) + } + dst := make(Nodes) + if len(a) > len(b) { + a, b = b, a + } + for e, n := range a { + if _, ok := b[e]; ok { + dst[e] = n + } + } + return dst +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go b/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go new file mode 100644 index 00000000..5f503c13 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go @@ -0,0 +1,54 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uid implements unique ID provision for graphs. +package uid + +import "gonum.org/v1/gonum/graph/internal/set" + +// Max is the maximum value of int64. +const Max = int64(^uint64(0) >> 1) + +// Set implements available ID storage. +type Set struct { + maxID int64 + used, free set.Int64s +} + +// NewSet returns a new Set. The returned value should not be passed except by pointer. +func NewSet() Set { + return Set{maxID: -1, used: make(set.Int64s), free: make(set.Int64s)} +} + +// NewID returns a new unique ID. The ID returned is not considered used +// until passed in a call to use. +func (s *Set) NewID() int64 { + for id := range s.free { + return id + } + if s.maxID != Max { + return s.maxID + 1 + } + for id := int64(0); id <= s.maxID+1; id++ { + if !s.used.Has(id) { + return id + } + } + panic("unreachable") +} + +// Use adds the id to the used IDs in the Set. +func (s *Set) Use(id int64) { + s.used.Add(id) + s.free.Remove(id) + if id > s.maxID { + s.maxID = id + } +} + +// Release frees the id for reuse. +func (s *Set) Release(id int64) { + s.free.Add(id) + s.used.Remove(id) +} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/doc.go b/vendor/gonum.org/v1/gonum/graph/iterator/doc.go new file mode 100644 index 00000000..0983bc7c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/iterator/doc.go @@ -0,0 +1,9 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package iterator provides node, edge and line iterators. +// +// The iterators provided satisfy the graph.Nodes, graph.Edges and +// graph.Lines interfaces. +package iterator diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/edges.go b/vendor/gonum.org/v1/gonum/graph/iterator/edges.go new file mode 100644 index 00000000..21ef0433 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/iterator/edges.go @@ -0,0 +1,131 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iterator + +import "gonum.org/v1/gonum/graph" + +// OrderedEdges implements the graph.Edges and graph.EdgeSlicer interfaces. +// The iteration order of OrderedEdges is the order of edges passed to +// NewEdgeIterator. +type OrderedEdges struct { + idx int + edges []graph.Edge +} + +// NewOrderedEdges returns an OrderedEdges initialized with the provided edges. +func NewOrderedEdges(edges []graph.Edge) *OrderedEdges { + return &OrderedEdges{idx: -1, edges: edges} +} + +// Len returns the remaining number of edges to be iterated over. +func (e *OrderedEdges) Len() int { + if e.idx >= len(e.edges) { + return 0 + } + if e.idx <= 0 { + return len(e.edges) + } + return len(e.edges[e.idx:]) +} + +// Next returns whether the next call of Edge will return a valid edge. +func (e *OrderedEdges) Next() bool { + if uint(e.idx)+1 < uint(len(e.edges)) { + e.idx++ + return true + } + e.idx = len(e.edges) + return false +} + +// Edge returns the current edge of the iterator. Next must have been +// called prior to a call to Edge. +func (e *OrderedEdges) Edge() graph.Edge { + if e.idx >= len(e.edges) || e.idx < 0 { + return nil + } + return e.edges[e.idx] +} + +// EdgeSlice returns all the remaining edges in the iterator and advances +// the iterator. +func (e *OrderedEdges) EdgeSlice() []graph.Edge { + if e.idx >= len(e.edges) { + return nil + } + idx := e.idx + if idx == -1 { + idx = 0 + } + e.idx = len(e.edges) + return e.edges[idx:] +} + +// Reset returns the iterator to its initial state. +func (e *OrderedEdges) Reset() { + e.idx = -1 +} + +// OrderedWeightedEdges implements the graph.Edges and graph.EdgeSlicer interfaces. +// The iteration order of OrderedWeightedEdges is the order of edges passed to +// NewEdgeIterator. +type OrderedWeightedEdges struct { + idx int + edges []graph.WeightedEdge +} + +// NewOrderedWeightedEdges returns an OrderedWeightedEdges initialized with the provided edges. +func NewOrderedWeightedEdges(edges []graph.WeightedEdge) *OrderedWeightedEdges { + return &OrderedWeightedEdges{idx: -1, edges: edges} +} + +// Len returns the remaining number of edges to be iterated over. +func (e *OrderedWeightedEdges) Len() int { + if e.idx >= len(e.edges) { + return 0 + } + if e.idx <= 0 { + return len(e.edges) + } + return len(e.edges[e.idx:]) +} + +// Next returns whether the next call of WeightedEdge will return a valid edge. +func (e *OrderedWeightedEdges) Next() bool { + if uint(e.idx)+1 < uint(len(e.edges)) { + e.idx++ + return true + } + e.idx = len(e.edges) + return false +} + +// WeightedEdge returns the current edge of the iterator. Next must have been +// called prior to a call to WeightedEdge. +func (e *OrderedWeightedEdges) WeightedEdge() graph.WeightedEdge { + if e.idx >= len(e.edges) || e.idx < 0 { + return nil + } + return e.edges[e.idx] +} + +// WeightedEdgeSlice returns all the remaining edges in the iterator and advances +// the iterator. +func (e *OrderedWeightedEdges) WeightedEdgeSlice() []graph.WeightedEdge { + if e.idx >= len(e.edges) { + return nil + } + idx := e.idx + if idx == -1 { + idx = 0 + } + e.idx = len(e.edges) + return e.edges[idx:] +} + +// Reset returns the iterator to its initial state. +func (e *OrderedWeightedEdges) Reset() { + e.idx = -1 +} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/lines.go b/vendor/gonum.org/v1/gonum/graph/iterator/lines.go new file mode 100644 index 00000000..ed655df0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/iterator/lines.go @@ -0,0 +1,131 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iterator + +import "gonum.org/v1/gonum/graph" + +// OrderedLines implements the graph.Lines and graph.LineSlicer interfaces. +// The iteration order of OrderedLines is the order of lines passed to +// NewLineIterator. +type OrderedLines struct { + idx int + lines []graph.Line +} + +// NewOrderedLines returns an OrderedLines initialized with the provided lines. +func NewOrderedLines(lines []graph.Line) *OrderedLines { + return &OrderedLines{idx: -1, lines: lines} +} + +// Len returns the remaining number of lines to be iterated over. +func (e *OrderedLines) Len() int { + if e.idx >= len(e.lines) { + return 0 + } + if e.idx <= 0 { + return len(e.lines) + } + return len(e.lines[e.idx:]) +} + +// Next returns whether the next call of Line will return a valid line. +func (e *OrderedLines) Next() bool { + if uint(e.idx)+1 < uint(len(e.lines)) { + e.idx++ + return true + } + e.idx = len(e.lines) + return false +} + +// Line returns the current line of the iterator. Next must have been +// called prior to a call to Line. +func (e *OrderedLines) Line() graph.Line { + if e.idx >= len(e.lines) || e.idx < 0 { + return nil + } + return e.lines[e.idx] +} + +// LineSlice returns all the remaining lines in the iterator and advances +// the iterator. +func (e *OrderedLines) LineSlice() []graph.Line { + if e.idx >= len(e.lines) { + return nil + } + idx := e.idx + if idx == -1 { + idx = 0 + } + e.idx = len(e.lines) + return e.lines[idx:] +} + +// Reset returns the iterator to its initial state. +func (e *OrderedLines) Reset() { + e.idx = -1 +} + +// OrderedWeightedLines implements the graph.Lines and graph.LineSlicer interfaces. +// The iteration order of OrderedWeightedLines is the order of lines passed to +// NewLineIterator. +type OrderedWeightedLines struct { + idx int + lines []graph.WeightedLine +} + +// NewWeightedLineIterator returns an OrderedWeightedLines initialized with the provided lines. +func NewOrderedWeightedLines(lines []graph.WeightedLine) *OrderedWeightedLines { + return &OrderedWeightedLines{idx: -1, lines: lines} +} + +// Len returns the remaining number of lines to be iterated over. +func (e *OrderedWeightedLines) Len() int { + if e.idx >= len(e.lines) { + return 0 + } + if e.idx <= 0 { + return len(e.lines) + } + return len(e.lines[e.idx:]) +} + +// Next returns whether the next call of WeightedLine will return a valid line. +func (e *OrderedWeightedLines) Next() bool { + if uint(e.idx)+1 < uint(len(e.lines)) { + e.idx++ + return true + } + e.idx = len(e.lines) + return false +} + +// WeightedLine returns the current line of the iterator. Next must have been +// called prior to a call to WeightedLine. +func (e *OrderedWeightedLines) WeightedLine() graph.WeightedLine { + if e.idx >= len(e.lines) || e.idx < 0 { + return nil + } + return e.lines[e.idx] +} + +// WeightedLineSlice returns all the remaining lines in the iterator and advances +// the iterator. +func (e *OrderedWeightedLines) WeightedLineSlice() []graph.WeightedLine { + if e.idx >= len(e.lines) { + return nil + } + idx := e.idx + if idx == -1 { + idx = 0 + } + e.idx = len(e.lines) + return e.lines[idx:] +} + +// Reset returns the iterator to its initial state. +func (e *OrderedWeightedLines) Reset() { + e.idx = -1 +} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go b/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go new file mode 100644 index 00000000..952dd770 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go @@ -0,0 +1,125 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iterator + +import "gonum.org/v1/gonum/graph" + +// OrderedNodes implements the graph.Nodes and graph.NodeSlicer interfaces. +// The iteration order of OrderedNodes is the order of nodes passed to +// NewNodeIterator. +type OrderedNodes struct { + idx int + nodes []graph.Node +} + +// NewOrderedNodes returns a OrderedNodes initialized with the provided nodes. +func NewOrderedNodes(nodes []graph.Node) *OrderedNodes { + return &OrderedNodes{idx: -1, nodes: nodes} +} + +// Len returns the remaining number of nodes to be iterated over. +func (n *OrderedNodes) Len() int { + if n.idx >= len(n.nodes) { + return 0 + } + if n.idx <= 0 { + return len(n.nodes) + } + return len(n.nodes[n.idx:]) +} + +// Next returns whether the next call of Node will return a valid node. +func (n *OrderedNodes) Next() bool { + if uint(n.idx)+1 < uint(len(n.nodes)) { + n.idx++ + return true + } + n.idx = len(n.nodes) + return false +} + +// Node returns the current node of the iterator. Next must have been +// called prior to a call to Node. +func (n *OrderedNodes) Node() graph.Node { + if n.idx >= len(n.nodes) || n.idx < 0 { + return nil + } + return n.nodes[n.idx] +} + +// NodeSlice returns all the remaining nodes in the iterator and advances +// the iterator. +func (n *OrderedNodes) NodeSlice() []graph.Node { + if n.idx >= len(n.nodes) { + return nil + } + idx := n.idx + if idx == -1 { + idx = 0 + } + n.idx = len(n.nodes) + return n.nodes[idx:] +} + +// Reset returns the iterator to its initial state. +func (n *OrderedNodes) Reset() { + n.idx = -1 +} + +// ImplicitNodes implements the graph.Nodes interface for a set of nodes over +// a contiguous ID range. +type ImplicitNodes struct { + beg, end int + curr int + newNode func(id int) graph.Node +} + +// NewImplicitNodes returns a new implicit node iterator spanning nodes in [beg,end). +// The provided new func maps the id to a graph.Node. NewImplicitNodes will panic +// if beg is greater than end. +func NewImplicitNodes(beg, end int, new func(id int) graph.Node) *ImplicitNodes { + if beg > end { + panic("iterator: invalid range") + } + return &ImplicitNodes{beg: beg, end: end, curr: beg - 1, newNode: new} +} + +// Len returns the remaining number of nodes to be iterated over. +func (n *ImplicitNodes) Len() int { + return n.end - n.curr - 1 +} + +// Next returns whether the next call of Node will return a valid node. +func (n *ImplicitNodes) Next() bool { + if n.curr == n.end { + return false + } + n.curr++ + return n.curr < n.end +} + +// Node returns the current node of the iterator. Next must have been +// called prior to a call to Node. +func (n *ImplicitNodes) Node() graph.Node { + if n.Len() == -1 || n.curr < n.beg { + return nil + } + return n.newNode(n.curr) +} + +// Reset returns the iterator to its initial state. +func (n *ImplicitNodes) Reset() { + n.curr = n.beg - 1 +} + +// NodeSlice returns all the remaining nodes in the iterator and advances +// the iterator. +func (n *ImplicitNodes) NodeSlice() []graph.Node { + nodes := make([]graph.Node, 0, n.Len()) + for n.curr++; n.curr < n.end; n.curr++ { + nodes = append(nodes, n.newNode(n.curr)) + } + return nodes +} diff --git a/vendor/gonum.org/v1/gonum/graph/multigraph.go b/vendor/gonum.org/v1/gonum/graph/multigraph.go new file mode 100644 index 00000000..038a3d51 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/multigraph.go @@ -0,0 +1,198 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Line is an edge in a multigraph. A Line returns an ID that must +// distinguish Lines sharing Node end points. +type Line interface { + // From returns the from node of the edge. + From() Node + + // To returns the to node of the edge. + To() Node + + // ReversedLine returns a line that has the + // end points of the receiver swapped. + ReversedLine() Line + + // ID returns the unique ID for the Line. + ID() int64 +} + +// WeightedLine is a weighted multigraph edge. +type WeightedLine interface { + Line + Weight() float64 +} + +// Multigraph is a generalized multigraph. +type Multigraph interface { + // Node returns the node with the given ID if it exists + // in the multigraph, and nil otherwise. + Node(id int64) Node + + // Nodes returns all the nodes in the multigraph. + // + // Nodes must not return nil. + Nodes() Nodes + + // From returns all nodes that can be reached directly + // from the node with the given ID. + // + // From must not return nil. + From(id int64) Nodes + + // HasEdgeBetween returns whether an edge exists between + // nodes with IDs xid and yid without considering direction. + HasEdgeBetween(xid, yid int64) bool + + // Lines returns the lines from u to v, with IDs uid and + // vid, if any such lines exist and nil otherwise. The + // node v must be directly reachable from u as defined by + // the From method. + // + // Lines must not return nil. + Lines(uid, vid int64) Lines +} + +// WeightedMultigraph is a weighted multigraph. +type WeightedMultigraph interface { + Multigraph + + // WeightedLines returns the weighted lines from u to v + // with IDs uid and vid if any such lines exist and nil + // otherwise. The node v must be directly reachable + // from u as defined by the From method. + // + // WeightedLines must not return nil. + WeightedLines(uid, vid int64) WeightedLines +} + +// UndirectedMultigraph is an undirected multigraph. +type UndirectedMultigraph interface { + Multigraph + + // LinesBetween returns the lines between nodes x and y + // with IDs xid and yid. + // + // LinesBetween must not return nil. + LinesBetween(xid, yid int64) Lines +} + +// WeightedUndirectedMultigraph is a weighted undirected multigraph. +type WeightedUndirectedMultigraph interface { + WeightedMultigraph + + // WeightedLinesBetween returns the lines between nodes + // x and y with IDs xid and yid. + // + // WeightedLinesBetween must not return nil. + WeightedLinesBetween(xid, yid int64) WeightedLines +} + +// DirectedMultigraph is a directed multigraph. +type DirectedMultigraph interface { + Multigraph + + // HasEdgeFromTo returns whether an edge exists + // in the multigraph from u to v with IDs uid + // and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + // + // To must not return nil. + To(id int64) Nodes +} + +// WeightedDirectedMultigraph is a weighted directed multigraph. +type WeightedDirectedMultigraph interface { + WeightedMultigraph + + // HasEdgeFromTo returns whether an edge exists + // in the multigraph from u to v with IDs uid + // and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + // + // To must not return nil. + To(id int64) Nodes +} + +// LineAdder is an interface for adding lines to a multigraph. +type LineAdder interface { + // NewLine returns a new Line from the source to the destination node. + NewLine(from, to Node) Line + + // SetLine adds a Line from one node to another. + // If the multigraph supports node addition the nodes + // will be added if they do not exist, otherwise + // SetLine will panic. + // Whether l, l.From() and l.To() are stored + // within the graph is implementation dependent. + SetLine(l Line) +} + +// WeightedLineAdder is an interface for adding lines to a multigraph. +type WeightedLineAdder interface { + // NewWeightedLine returns a new WeightedLine from + // the source to the destination node. + NewWeightedLine(from, to Node, weight float64) WeightedLine + + // SetWeightedLine adds a weighted line from one node + // to another. If the multigraph supports node addition + // the nodes will be added if they do not exist, + // otherwise SetWeightedLine will panic. + // Whether l, l.From() and l.To() are stored + // within the graph is implementation dependent. + SetWeightedLine(l WeightedLine) +} + +// LineRemover is an interface for removing lines from a multigraph. +type LineRemover interface { + // RemoveLine removes the line with the given end + // and line IDs, leaving the terminal nodes. If + // the line does not exist it is a no-op. + RemoveLine(fid, tid, id int64) +} + +// MultigraphBuilder is a multigraph that can have nodes and lines added. +type MultigraphBuilder interface { + NodeAdder + LineAdder +} + +// WeightedMultigraphBuilder is a multigraph that can have nodes and weighted lines added. +type WeightedMultigraphBuilder interface { + NodeAdder + WeightedLineAdder +} + +// UndirectedMultgraphBuilder is an undirected multigraph builder. +type UndirectedMultigraphBuilder interface { + UndirectedMultigraph + MultigraphBuilder +} + +// UndirectedWeightedMultigraphBuilder is an undirected weighted multigraph builder. +type UndirectedWeightedMultigraphBuilder interface { + UndirectedMultigraph + WeightedMultigraphBuilder +} + +// DirectedMultigraphBuilder is a directed multigraph builder. +type DirectedMultigraphBuilder interface { + DirectedMultigraph + MultigraphBuilder +} + +// DirectedWeightedMultigraphBuilder is a directed weighted multigraph builder. +type DirectedWeightedMultigraphBuilder interface { + DirectedMultigraph + WeightedMultigraphBuilder +} diff --git a/vendor/gonum.org/v1/gonum/graph/nodes_edges.go b/vendor/gonum.org/v1/gonum/graph/nodes_edges.go new file mode 100644 index 00000000..3d5dae1f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/nodes_edges.go @@ -0,0 +1,300 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Iterator is an item iterator. +type Iterator interface { + // Next advances the iterator and returns whether + // the next call to the item method will return a + // non-nil item. + // + // Next should be called prior to any call to the + // iterator's item retrieval method after the + // iterator has been obtained or reset. + // + // The order of iteration is implementation + // dependent. + Next() bool + + // Len returns the number of items remaining in the + // iterator. + // + // If the number of items in the iterator is unknown, + // too large to materialize or too costly to calculate + // then Len may return a negative value. + // In this case the consuming function must be able + // to operate on the items of the iterator directly + // without materializing the items into a slice. + // The magnitude of a negative length has + // implementation-dependent semantics. + Len() int + + // Reset returns the iterator to its start position. + Reset() +} + +// Nodes is a Node iterator. +type Nodes interface { + Iterator + + // Node returns the current Node from the iterator. + Node() Node +} + +// NodeSlicer wraps the NodeSlice method. +type NodeSlicer interface { + // NodeSlice returns the set of nodes remaining + // to be iterated by a Nodes iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + NodeSlice() []Node +} + +// NodesOf returns it.Len() nodes from it. If it is a NodeSlicer, the NodeSlice method +// is used to obtain the nodes. It is safe to pass a nil Nodes to NodesOf. +// +// If the Nodes has an indeterminate length, NodesOf will panic. +func NodesOf(it Nodes) []Node { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called NodesOf on indeterminate iterator") + } + switch it := it.(type) { + case NodeSlicer: + return it.NodeSlice() + } + n := make([]Node, 0, len) + for it.Next() { + n = append(n, it.Node()) + } + return n +} + +// Edges is an Edge iterator. +type Edges interface { + Iterator + + // Edge returns the current Edge from the iterator. + Edge() Edge +} + +// EdgeSlicer wraps the EdgeSlice method. +type EdgeSlicer interface { + // EdgeSlice returns the set of edges remaining + // to be iterated by an Edges iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + EdgeSlice() []Edge +} + +// EdgesOf returns it.Len() nodes from it. If it is an EdgeSlicer, the EdgeSlice method is used +// to obtain the edges. It is safe to pass a nil Edges to EdgesOf. +// +// If the Edges has an indeterminate length, EdgesOf will panic. +func EdgesOf(it Edges) []Edge { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called EdgesOf on indeterminate iterator") + } + switch it := it.(type) { + case EdgeSlicer: + return it.EdgeSlice() + } + e := make([]Edge, 0, len) + for it.Next() { + e = append(e, it.Edge()) + } + return e +} + +// WeightedEdges is a WeightedEdge iterator. +type WeightedEdges interface { + Iterator + + // Edge returns the current Edge from the iterator. + WeightedEdge() WeightedEdge +} + +// WeightedEdgeSlicer wraps the WeightedEdgeSlice method. +type WeightedEdgeSlicer interface { + // EdgeSlice returns the set of edges remaining + // to be iterated by an Edges iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + WeightedEdgeSlice() []WeightedEdge +} + +// WeightedEdgesOf returns it.Len() weighted edge from it. If it is a WeightedEdgeSlicer, the +// WeightedEdgeSlice method is used to obtain the edges. It is safe to pass a nil WeightedEdges +// to WeightedEdgesOf. +// +// If the WeightedEdges has an indeterminate length, WeightedEdgesOf will panic. +func WeightedEdgesOf(it WeightedEdges) []WeightedEdge { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called WeightedEdgesOf on indeterminate iterator") + } + switch it := it.(type) { + case WeightedEdgeSlicer: + return it.WeightedEdgeSlice() + } + e := make([]WeightedEdge, 0, len) + for it.Next() { + e = append(e, it.WeightedEdge()) + } + return e +} + +// Lines is a Line iterator. +type Lines interface { + Iterator + + // Line returns the current Line from the iterator. + Line() Line +} + +// LineSlicer wraps the LineSlice method. +type LineSlicer interface { + // LineSlice returns the set of lines remaining + // to be iterated by an Lines iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + LineSlice() []Line +} + +// LinesOf returns it.Len() nodes from it. If it is a LineSlicer, the LineSlice method is used +// to obtain the lines. It is safe to pass a nil Lines to LinesOf. +// +// If the Lines has an indeterminate length, LinesOf will panic. +func LinesOf(it Lines) []Line { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called LinesOf on indeterminate iterator") + } + switch it := it.(type) { + case LineSlicer: + return it.LineSlice() + } + l := make([]Line, 0, len) + for it.Next() { + l = append(l, it.Line()) + } + return l +} + +// WeightedLines is a WeightedLine iterator. +type WeightedLines interface { + Iterator + + // Line returns the current Line from the iterator. + WeightedLine() WeightedLine +} + +// WeightedLineSlicer wraps the WeightedLineSlice method. +type WeightedLineSlicer interface { + // LineSlice returns the set of lines remaining + // to be iterated by an Lines iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + WeightedLineSlice() []WeightedLine +} + +// WeightedLinesOf returns it.Len() weighted line from it. If it is a WeightedLineSlicer, the +// WeightedLineSlice method is used to obtain the lines. It is safe to pass a nil WeightedLines +// to WeightedLinesOf. +// +// If the WeightedLines has an indeterminate length, WeightedLinesOf will panic. +func WeightedLinesOf(it WeightedLines) []WeightedLine { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called WeightedLinesOf on indeterminate iterator") + } + switch it := it.(type) { + case WeightedLineSlicer: + return it.WeightedLineSlice() + } + l := make([]WeightedLine, 0, len) + for it.Next() { + l = append(l, it.WeightedLine()) + } + return l +} + +// Empty is an empty set of nodes, edges or lines. It should be used when +// a graph returns a zero-length Iterator. Empty implements the slicer +// interfaces for nodes, edges and lines, returning nil for each of these. +const Empty = nothing + +var ( + _ Iterator = Empty + _ Nodes = Empty + _ NodeSlicer = Empty + _ Edges = Empty + _ EdgeSlicer = Empty + _ WeightedEdges = Empty + _ WeightedEdgeSlicer = Empty + _ Lines = Empty + _ LineSlicer = Empty + _ WeightedLines = Empty + _ WeightedLineSlicer = Empty +) + +const nothing = empty(true) + +type empty bool + +func (empty) Next() bool { return false } +func (empty) Len() int { return 0 } +func (empty) Reset() {} +func (empty) Node() Node { return nil } +func (empty) NodeSlice() []Node { return nil } +func (empty) Edge() Edge { return nil } +func (empty) EdgeSlice() []Edge { return nil } +func (empty) WeightedEdge() WeightedEdge { return nil } +func (empty) WeightedEdgeSlice() []WeightedEdge { return nil } +func (empty) Line() Line { return nil } +func (empty) LineSlice() []Line { return nil } +func (empty) WeightedLine() WeightedLine { return nil } +func (empty) WeightedLineSlice() []WeightedLine { return nil } diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go new file mode 100644 index 00000000..3daca9ad --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go @@ -0,0 +1,301 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/mat" +) + +var ( + dm *DirectedMatrix + + _ graph.Graph = dm + _ graph.Directed = dm + _ edgeSetter = dm + _ weightedEdgeSetter = dm +) + +// DirectedMatrix represents a directed graph using an adjacency +// matrix such that all IDs are in a contiguous block from 0 to n-1. +// Edges are stored implicitly as an edge weight, so edges stored in +// the graph are not recoverable. +type DirectedMatrix struct { + mat *mat.Dense + nodes []graph.Node + + self float64 + absent float64 +} + +// NewDirectedMatrix creates a directed dense graph with n nodes. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewDirectedMatrix(n int, init, self, absent float64) *DirectedMatrix { + matrix := make([]float64, n*n) + if init != 0 { + for i := range matrix { + matrix[i] = init + } + } + for i := 0; i < len(matrix); i += n + 1 { + matrix[i] = self + } + return &DirectedMatrix{ + mat: mat.NewDense(n, n, matrix), + self: self, + absent: absent, + } +} + +// NewDirectedMatrixFrom creates a directed dense graph with the given nodes. +// The IDs of the nodes must be contiguous from 0 to len(nodes)-1, but may +// be in any order. If IDs are not contiguous NewDirectedMatrixFrom will panic. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewDirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *DirectedMatrix { + sort.Sort(ordered.ByID(nodes)) + for i, n := range nodes { + if int64(i) != n.ID() { + panic("simple: non-contiguous node IDs") + } + } + g := NewDirectedMatrix(len(nodes), init, self, absent) + g.nodes = nodes + return g +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedMatrix) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdge(uid, vid) +} + +// Edges returns all the edges in the graph. +func (g *DirectedMatrix) Edges() graph.Edges { + var edges []graph.Edge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := 0; j < r; j++ { + if i == j { + continue + } + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *DirectedMatrix) From(id int64) graph.Nodes { + if !g.has(id) { + return graph.Empty + } + var nodes []graph.Node + _, c := g.mat.Dims() + for j := 0; j < c; j++ { + if int64(j) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(int(id), j), g.absent) { + nodes = append(nodes, g.Node(int64(j))) + } + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *DirectedMatrix) HasEdgeBetween(xid, yid int64) bool { + if !g.has(xid) { + return false + } + if !g.has(yid) { + return false + } + // xid and yid are not greater than maximum int by this point. + return xid != yid && (!isSame(g.mat.At(int(xid), int(yid)), g.absent) || !isSame(g.mat.At(int(yid), int(xid)), g.absent)) +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *DirectedMatrix) HasEdgeFromTo(uid, vid int64) bool { + if !g.has(uid) { + return false + } + if !g.has(vid) { + return false + } + // uid and vid are not greater than maximum int by this point. + return uid != vid && !isSame(g.mat.At(int(uid), int(vid)), g.absent) +} + +// Matrix returns the mat.Matrix representation of the graph. The orientation +// of the matrix is such that the matrix entry at G_{ij} is the weight of the edge +// from node i to node j. +func (g *DirectedMatrix) Matrix() mat.Matrix { + // Prevent alteration of dimensions of the returned matrix. + m := *g.mat + return &m +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *DirectedMatrix) Node(id int64) graph.Node { + if !g.has(id) { + return nil + } + if g.nodes == nil { + return Node(id) + } + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *DirectedMatrix) Nodes() graph.Nodes { + if g.nodes != nil { + nodes := make([]graph.Node, len(g.nodes)) + copy(nodes, g.nodes) + return iterator.NewOrderedNodes(nodes) + } + r, _ := g.mat.Dims() + // Matrix graphs must have at least one node. + return iterator.NewImplicitNodes(0, r, newSimpleNode) +} + +// RemoveEdge removes the edge with the given end point nodes from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *DirectedMatrix) RemoveEdge(fid, tid int64) { + if !g.has(fid) { + return + } + if !g.has(tid) { + return + } + // fid and tid are not greater than maximum int by this point. + g.mat.Set(int(fid), int(tid), g.absent) +} + +// SetEdge sets e, an edge from one node to another with unit weight. If the ends of the edge +// are not in g or the edge is a self loop, SetEdge panics. SetEdge will store the nodes of +// e in the graph if it was initialized with NewDirectedMatrixFrom. +func (g *DirectedMatrix) SetEdge(e graph.Edge) { + g.setWeightedEdge(e, 1) +} + +// SetWeightedEdge sets e, an edge from one node to another. If the ends of the edge are not in g +// or the edge is a self loop, SetWeightedEdge panics. SetWeightedEdge will store the nodes of +// e in the graph if it was initialized with NewDirectedMatrixFrom. +func (g *DirectedMatrix) SetWeightedEdge(e graph.WeightedEdge) { + g.setWeightedEdge(e, e.Weight()) +} + +func (g *DirectedMatrix) setWeightedEdge(e graph.Edge, weight float64) { + from := e.From() + fid := from.ID() + to := e.To() + tid := to.ID() + if fid == tid { + panic("simple: set illegal edge") + } + if int64(int(fid)) != fid { + panic("simple: unavailable from node ID for dense graph") + } + if int64(int(tid)) != tid { + panic("simple: unavailable to node ID for dense graph") + } + if g.nodes != nil { + g.nodes[fid] = from + g.nodes[tid] = to + } + // fid and tid are not greater than maximum int by this point. + g.mat.Set(int(fid), int(tid), weight) +} + +// To returns all nodes in g that can reach directly to n. +func (g *DirectedMatrix) To(id int64) graph.Nodes { + if !g.has(id) { + return graph.Empty + } + var nodes []graph.Node + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(i, int(id)), g.absent) { + nodes = append(nodes, g.Node(int64(i))) + } + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *DirectedMatrix) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if g.HasEdgeFromTo(xid, yid) { + // xid and yid are not greater than maximum int by this point. + return g.mat.At(int(xid), int(yid)), true + } + return g.absent, false +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedMatrix) WeightedEdge(uid, vid int64) graph.WeightedEdge { + if g.HasEdgeFromTo(uid, vid) { + // xid and yid are not greater than maximum int by this point. + return WeightedEdge{F: g.Node(uid), T: g.Node(vid), W: g.mat.At(int(uid), int(vid))} + } + return nil +} + +// WeightedEdges returns all the edges in the graph. +func (g *DirectedMatrix) WeightedEdges() graph.WeightedEdges { + var edges []graph.WeightedEdge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := 0; j < r; j++ { + if i == j { + continue + } + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedWeightedEdges(edges) +} + +func (g *DirectedMatrix) has(id int64) bool { + r, _ := g.mat.Dims() + return 0 <= id && id < int64(r) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go new file mode 100644 index 00000000..f51debb4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go @@ -0,0 +1,268 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/mat" +) + +var ( + um *UndirectedMatrix + + _ graph.Graph = um + _ graph.Undirected = um + _ edgeSetter = um + _ weightedEdgeSetter = um +) + +// UndirectedMatrix represents an undirected graph using an adjacency +// matrix such that all IDs are in a contiguous block from 0 to n-1. +// Edges are stored implicitly as an edge weight, so edges stored in +// the graph are not recoverable. +type UndirectedMatrix struct { + mat *mat.SymDense + nodes []graph.Node + + self float64 + absent float64 +} + +// NewUndirectedMatrix creates an undirected dense graph with n nodes. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix { + matrix := make([]float64, n*n) + if init != 0 { + for i := range matrix { + matrix[i] = init + } + } + for i := 0; i < len(matrix); i += n + 1 { + matrix[i] = self + } + return &UndirectedMatrix{ + mat: mat.NewSymDense(n, matrix), + self: self, + absent: absent, + } +} + +// NewUndirectedMatrixFrom creates an undirected dense graph with the given nodes. +// The IDs of the nodes must be contiguous from 0 to len(nodes)-1, but may +// be in any order. If IDs are not contiguous NewUndirectedMatrixFrom will panic. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewUndirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *UndirectedMatrix { + sort.Sort(ordered.ByID(nodes)) + for i, n := range nodes { + if int64(i) != n.ID() { + panic("simple: non-contiguous node IDs") + } + } + g := NewUndirectedMatrix(len(nodes), init, self, absent) + g.nodes = nodes + return g +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedMatrix) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *UndirectedMatrix) EdgeBetween(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// Edges returns all the edges in the graph. +func (g *UndirectedMatrix) Edges() graph.Edges { + var edges []graph.Edge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := i + 1; j < r; j++ { + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *UndirectedMatrix) From(id int64) graph.Nodes { + if !g.has(id) { + return graph.Empty + } + var nodes []graph.Node + r := g.mat.Symmetric() + for i := 0; i < r; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(int(id), i), g.absent) { + nodes = append(nodes, g.Node(int64(i))) + } + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *UndirectedMatrix) HasEdgeBetween(uid, vid int64) bool { + if !g.has(uid) { + return false + } + if !g.has(vid) { + return false + } + // uid and vid are not greater than maximum int by this point. + return uid != vid && !isSame(g.mat.At(int(uid), int(vid)), g.absent) +} + +// Matrix returns the mat.Matrix representation of the graph. +func (g *UndirectedMatrix) Matrix() mat.Matrix { + // Prevent alteration of dimensions of the returned matrix. + m := *g.mat + return &m +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *UndirectedMatrix) Node(id int64) graph.Node { + if !g.has(id) { + return nil + } + if g.nodes == nil { + return Node(id) + } + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *UndirectedMatrix) Nodes() graph.Nodes { + if g.nodes != nil { + nodes := make([]graph.Node, len(g.nodes)) + copy(nodes, g.nodes) + return iterator.NewOrderedNodes(nodes) + } + r := g.mat.Symmetric() + // Matrix graphs must have at least one node. + return iterator.NewImplicitNodes(0, r, newSimpleNode) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *UndirectedMatrix) RemoveEdge(fid, tid int64) { + if !g.has(fid) { + return + } + if !g.has(tid) { + return + } + // fid and tid are not greater than maximum int by this point. + g.mat.SetSym(int(fid), int(tid), g.absent) +} + +// SetEdge sets e, an edge from one node to another with unit weight. If the ends of the edge are +// not in g or the edge is a self loop, SetEdge panics. SetEdge will store the nodes of +// e in the graph if it was initialized with NewUndirectedMatrixFrom. +func (g *UndirectedMatrix) SetEdge(e graph.Edge) { + g.setWeightedEdge(e, 1) +} + +// SetWeightedEdge sets e, an edge from one node to another. If the ends of the edge are not in g +// or the edge is a self loop, SetWeightedEdge panics. SetWeightedEdge will store the nodes of +// e in the graph if it was initialized with NewUndirectedMatrixFrom. +func (g *UndirectedMatrix) SetWeightedEdge(e graph.WeightedEdge) { + g.setWeightedEdge(e, e.Weight()) +} + +func (g *UndirectedMatrix) setWeightedEdge(e graph.Edge, weight float64) { + from := e.From() + fid := from.ID() + to := e.To() + tid := to.ID() + if fid == tid { + panic("simple: set illegal edge") + } + if int64(int(fid)) != fid { + panic("simple: unavailable from node ID for dense graph") + } + if int64(int(tid)) != tid { + panic("simple: unavailable to node ID for dense graph") + } + if g.nodes != nil { + g.nodes[fid] = from + g.nodes[tid] = to + } + // fid and tid are not greater than maximum int by this point. + g.mat.SetSym(int(fid), int(tid), weight) +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *UndirectedMatrix) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if g.HasEdgeBetween(xid, yid) { + // xid and yid are not greater than maximum int by this point. + return g.mat.At(int(xid), int(yid)), true + } + return g.absent, false +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedMatrix) WeightedEdge(uid, vid int64) graph.WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. +func (g *UndirectedMatrix) WeightedEdgeBetween(uid, vid int64) graph.WeightedEdge { + if g.HasEdgeBetween(uid, vid) { + // uid and vid are not greater than maximum int by this point. + return WeightedEdge{F: g.Node(uid), T: g.Node(vid), W: g.mat.At(int(uid), int(vid))} + } + return nil +} + +// WeightedEdges returns all the edges in the graph. +func (g *UndirectedMatrix) WeightedEdges() graph.WeightedEdges { + var edges []graph.WeightedEdge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := i + 1; j < r; j++ { + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedWeightedEdges(edges) +} + +func (g *UndirectedMatrix) has(id int64) bool { + r := g.mat.Symmetric() + return 0 <= id && id < int64(r) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/directed.go b/vendor/gonum.org/v1/gonum/graph/simple/directed.go new file mode 100644 index 00000000..f19efbd0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/directed.go @@ -0,0 +1,235 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" + "gonum.org/v1/gonum/graph/iterator" +) + +var ( + dg *DirectedGraph + + _ graph.Graph = dg + _ graph.Directed = dg + _ graph.NodeAdder = dg + _ graph.NodeRemover = dg + _ graph.EdgeAdder = dg + _ graph.EdgeRemover = dg +) + +// DirectedGraph implements a generalized directed graph. +type DirectedGraph struct { + nodes map[int64]graph.Node + from map[int64]map[int64]graph.Edge + to map[int64]map[int64]graph.Edge + + nodeIDs uid.Set +} + +// NewDirectedGraph returns a DirectedGraph. +func NewDirectedGraph() *DirectedGraph { + return &DirectedGraph{ + nodes: make(map[int64]graph.Node), + from: make(map[int64]map[int64]graph.Edge), + to: make(map[int64]map[int64]graph.Edge), + + nodeIDs: uid.NewSet(), + } +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *DirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.from[n.ID()] = make(map[int64]graph.Edge) + g.to[n.ID()] = make(map[int64]graph.Edge) + g.nodeIDs.Use(n.ID()) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedGraph) Edge(uid, vid int64) graph.Edge { + edge, ok := g.from[uid][vid] + if !ok { + return nil + } + return edge +} + +// Edges returns all the edges in the graph. +func (g *DirectedGraph) Edges() graph.Edges { + var edges []graph.Edge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *DirectedGraph) From(id int64) graph.Nodes { + if _, ok := g.from[id]; !ok { + return graph.Empty + } + + from := make([]graph.Node, len(g.from[id])) + i := 0 + for vid := range g.from[id] { + from[i] = g.nodes[vid] + i++ + } + if len(from) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(from) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *DirectedGraph) HasEdgeBetween(xid, yid int64) bool { + if _, ok := g.from[xid][yid]; ok { + return true + } + _, ok := g.from[yid][xid] + return ok +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *DirectedGraph) HasEdgeFromTo(uid, vid int64) bool { + if _, ok := g.from[uid][vid]; !ok { + return false + } + return true +} + +// NewEdge returns a new Edge from the source to the destination node. +func (g *DirectedGraph) NewEdge(from, to graph.Node) graph.Edge { + return &Edge{F: from, T: to} +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *DirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *DirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *DirectedGraph) Nodes() graph.Nodes { + if len(g.nodes) == 0 { + return graph.Empty + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return iterator.NewOrderedNodes(nodes) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *DirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.from[fid], tid) + delete(g.to[tid], fid) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *DirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.from[id] { + delete(g.to[from], id) + } + delete(g.from, id) + + for to := range g.to[id] { + delete(g.from[to], id) + } + delete(g.to, id) + + g.nodeIDs.Release(id) +} + +// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added +// and are set to the nodes of the edge otherwise. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *DirectedGraph) SetEdge(e graph.Edge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if _, ok := g.nodes[fid]; !ok { + g.AddNode(from) + } else { + g.nodes[fid] = from + } + if _, ok := g.nodes[tid]; !ok { + g.AddNode(to) + } else { + g.nodes[tid] = to + } + + g.from[fid][tid] = e + g.to[tid][fid] = e +} + +// To returns all nodes in g that can reach directly to n. +func (g *DirectedGraph) To(id int64) graph.Nodes { + if _, ok := g.from[id]; !ok { + return graph.Empty + } + + to := make([]graph.Node, len(g.to[id])) + i := 0 + for uid := range g.to[id] { + to[i] = g.nodes[uid] + i++ + } + if len(to) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(to) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/doc.go b/vendor/gonum.org/v1/gonum/graph/simple/doc.go new file mode 100644 index 00000000..dc3f24c5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/doc.go @@ -0,0 +1,9 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package simple provides a suite of simple graph implementations satisfying +// the gonum/graph interfaces. +// +// All types in simple return the graph.Empty value for empty iterators. +package simple // import "gonum.org/v1/gonum/graph/simple" diff --git a/vendor/gonum.org/v1/gonum/graph/simple/simple.go b/vendor/gonum.org/v1/gonum/graph/simple/simple.go new file mode 100644 index 00000000..3b457658 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/simple.go @@ -0,0 +1,72 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "math" + + "gonum.org/v1/gonum/graph" +) + +// Node is a simple graph node. +type Node int64 + +// ID returns the ID number of the node. +func (n Node) ID() int64 { + return int64(n) +} + +func newSimpleNode(id int) graph.Node { + return Node(id) +} + +// Edge is a simple graph edge. +type Edge struct { + F, T graph.Node +} + +// From returns the from-node of the edge. +func (e Edge) From() graph.Node { return e.F } + +// To returns the to-node of the edge. +func (e Edge) To() graph.Node { return e.T } + +// ReversedLine returns a new Edge with the F and T fields +// swapped. +func (e Edge) ReversedEdge() graph.Edge { return Edge{F: e.T, T: e.F} } + +// WeightedEdge is a simple weighted graph edge. +type WeightedEdge struct { + F, T graph.Node + W float64 +} + +// From returns the from-node of the edge. +func (e WeightedEdge) From() graph.Node { return e.F } + +// To returns the to-node of the edge. +func (e WeightedEdge) To() graph.Node { return e.T } + +// ReversedLine returns a new Edge with the F and T fields +// swapped. The weight of the new Edge is the same as +// the weight of the receiver. +func (e WeightedEdge) ReversedEdge() graph.Edge { return WeightedEdge{F: e.T, T: e.F, W: e.W} } + +// Weight returns the weight of the edge. +func (e WeightedEdge) Weight() float64 { return e.W } + +// isSame returns whether two float64 values are the same where NaN values +// are equalable. +func isSame(a, b float64) bool { + return a == b || (math.IsNaN(a) && math.IsNaN(b)) +} + +type edgeSetter interface { + SetEdge(e graph.Edge) +} + +type weightedEdgeSetter interface { + SetWeightedEdge(e graph.WeightedEdge) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/undirected.go b/vendor/gonum.org/v1/gonum/graph/simple/undirected.go new file mode 100644 index 00000000..841a8e38 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/undirected.go @@ -0,0 +1,216 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" + "gonum.org/v1/gonum/graph/iterator" +) + +var ( + ug *UndirectedGraph + + _ graph.Graph = ug + _ graph.Undirected = ug + _ graph.NodeAdder = ug + _ graph.NodeRemover = ug + _ graph.EdgeAdder = ug + _ graph.EdgeRemover = ug +) + +// UndirectedGraph implements a generalized undirected graph. +type UndirectedGraph struct { + nodes map[int64]graph.Node + edges map[int64]map[int64]graph.Edge + + nodeIDs uid.Set +} + +// NewUndirectedGraph returns an UndirectedGraph. +func NewUndirectedGraph() *UndirectedGraph { + return &UndirectedGraph{ + nodes: make(map[int64]graph.Node), + edges: make(map[int64]map[int64]graph.Edge), + + nodeIDs: uid.NewSet(), + } +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *UndirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.edges[n.ID()] = make(map[int64]graph.Edge) + g.nodeIDs.Use(n.ID()) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.EdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *UndirectedGraph) EdgeBetween(xid, yid int64) graph.Edge { + edge, ok := g.edges[xid][yid] + if !ok { + return nil + } + if edge.From().ID() == xid { + return edge + } + return edge.ReversedEdge() +} + +// Edges returns all the edges in the graph. +func (g *UndirectedGraph) Edges() graph.Edges { + if len(g.edges) == 0 { + return graph.Empty + } + var edges []graph.Edge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *UndirectedGraph) From(id int64) graph.Nodes { + if _, ok := g.nodes[id]; !ok { + return graph.Empty + } + + nodes := make([]graph.Node, len(g.edges[id])) + i := 0 + for from := range g.edges[id] { + nodes[i] = g.nodes[from] + i++ + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *UndirectedGraph) HasEdgeBetween(xid, yid int64) bool { + _, ok := g.edges[xid][yid] + return ok +} + +// NewEdge returns a new Edge from the source to the destination node. +func (g *UndirectedGraph) NewEdge(from, to graph.Node) graph.Edge { + return &Edge{F: from, T: to} +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *UndirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *UndirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *UndirectedGraph) Nodes() graph.Nodes { + if len(g.nodes) == 0 { + return graph.Empty + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return iterator.NewOrderedNodes(nodes) +} + +// RemoveEdge removes the edge with the given end IDs from the graph, leaving the terminal nodes. +// If the edge does not exist it is a no-op. +func (g *UndirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.edges[fid], tid) + delete(g.edges[tid], fid) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *UndirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.edges[id] { + delete(g.edges[from], id) + } + delete(g.edges, id) + + g.nodeIDs.Release(id) +} + +// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added +// and are set to the nodes of the edge otherwise. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *UndirectedGraph) SetEdge(e graph.Edge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if _, ok := g.nodes[fid]; !ok { + g.AddNode(from) + } else { + g.nodes[fid] = from + } + if _, ok := g.nodes[tid]; !ok { + g.AddNode(to) + } else { + g.nodes[tid] = to + } + + g.edges[fid][tid] = e + g.edges[tid][fid] = e +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go b/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go new file mode 100644 index 00000000..92bd2842 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go @@ -0,0 +1,279 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" + "gonum.org/v1/gonum/graph/iterator" +) + +var ( + wdg *WeightedDirectedGraph + + _ graph.Graph = wdg + _ graph.Weighted = wdg + _ graph.Directed = wdg + _ graph.WeightedDirected = wdg + _ graph.NodeAdder = wdg + _ graph.NodeRemover = wdg + _ graph.WeightedEdgeAdder = wdg + _ graph.EdgeRemover = wdg +) + +// WeightedDirectedGraph implements a generalized weighted directed graph. +type WeightedDirectedGraph struct { + nodes map[int64]graph.Node + from map[int64]map[int64]graph.WeightedEdge + to map[int64]map[int64]graph.WeightedEdge + + self, absent float64 + + nodeIDs uid.Set +} + +// NewWeightedDirectedGraph returns a WeightedDirectedGraph with the specified self and absent +// edge weight values. +func NewWeightedDirectedGraph(self, absent float64) *WeightedDirectedGraph { + return &WeightedDirectedGraph{ + nodes: make(map[int64]graph.Node), + from: make(map[int64]map[int64]graph.WeightedEdge), + to: make(map[int64]map[int64]graph.WeightedEdge), + + self: self, + absent: absent, + + nodeIDs: uid.NewSet(), + } +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *WeightedDirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.from[n.ID()] = make(map[int64]graph.WeightedEdge) + g.to[n.ID()] = make(map[int64]graph.WeightedEdge) + g.nodeIDs.Use(n.ID()) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedDirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdge(uid, vid) +} + +// Edges returns all the edges in the graph. +func (g *WeightedDirectedGraph) Edges() graph.Edges { + var edges []graph.Edge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *WeightedDirectedGraph) From(id int64) graph.Nodes { + if _, ok := g.from[id]; !ok { + return graph.Empty + } + + from := make([]graph.Node, len(g.from[id])) + i := 0 + for vid := range g.from[id] { + from[i] = g.nodes[vid] + i++ + } + if len(from) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(from) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *WeightedDirectedGraph) HasEdgeBetween(xid, yid int64) bool { + if _, ok := g.from[xid][yid]; ok { + return true + } + _, ok := g.from[yid][xid] + return ok +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *WeightedDirectedGraph) HasEdgeFromTo(uid, vid int64) bool { + if _, ok := g.from[uid][vid]; !ok { + return false + } + return true +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *WeightedDirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// NewWeightedEdge returns a new weighted edge from the source to the destination node. +func (g *WeightedDirectedGraph) NewWeightedEdge(from, to graph.Node, weight float64) graph.WeightedEdge { + return &WeightedEdge{F: from, T: to, W: weight} +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *WeightedDirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *WeightedDirectedGraph) Nodes() graph.Nodes { + if len(g.from) == 0 { + return graph.Empty + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return iterator.NewOrderedNodes(nodes) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *WeightedDirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.from[fid], tid) + delete(g.to[tid], fid) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *WeightedDirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.from[id] { + delete(g.to[from], id) + } + delete(g.from, id) + + for to := range g.to[id] { + delete(g.from[to], id) + } + delete(g.to, id) + + g.nodeIDs.Release(id) +} + +// SetWeightedEdge adds a weighted edge from one node to another. If the nodes do not exist, they are added +// and are set to the nodes of the edge otherwise. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *WeightedDirectedGraph) SetWeightedEdge(e graph.WeightedEdge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if _, ok := g.nodes[fid]; !ok { + g.AddNode(from) + } else { + g.nodes[fid] = from + } + if _, ok := g.nodes[tid]; !ok { + g.AddNode(to) + } else { + g.nodes[tid] = to + } + + g.from[fid][tid] = e + g.to[tid][fid] = e +} + +// To returns all nodes in g that can reach directly to n. +func (g *WeightedDirectedGraph) To(id int64) graph.Nodes { + if _, ok := g.from[id]; !ok { + return graph.Empty + } + + to := make([]graph.Node, len(g.to[id])) + i := 0 + for uid := range g.to[id] { + to[i] = g.nodes[uid] + i++ + } + if len(to) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(to) +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *WeightedDirectedGraph) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if to, ok := g.from[xid]; ok { + if e, ok := to[yid]; ok { + return e.Weight(), true + } + } + return g.absent, false +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedDirectedGraph) WeightedEdge(uid, vid int64) graph.WeightedEdge { + edge, ok := g.from[uid][vid] + if !ok { + return nil + } + return edge +} + +// WeightedEdges returns all the weighted edges in the graph. +func (g *WeightedDirectedGraph) WeightedEdges() graph.WeightedEdges { + var edges []graph.WeightedEdge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedWeightedEdges(edges) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go b/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go new file mode 100644 index 00000000..59325768 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go @@ -0,0 +1,273 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" + "gonum.org/v1/gonum/graph/iterator" +) + +var ( + wug *WeightedUndirectedGraph + + _ graph.Graph = wug + _ graph.Weighted = wug + _ graph.Undirected = wug + _ graph.WeightedUndirected = wug + _ graph.NodeAdder = wug + _ graph.NodeRemover = wug + _ graph.WeightedEdgeAdder = wug + _ graph.EdgeRemover = wug +) + +// WeightedUndirectedGraph implements a generalized weighted undirected graph. +type WeightedUndirectedGraph struct { + nodes map[int64]graph.Node + edges map[int64]map[int64]graph.WeightedEdge + + self, absent float64 + + nodeIDs uid.Set +} + +// NewWeightedUndirectedGraph returns an WeightedUndirectedGraph with the specified self and absent +// edge weight values. +func NewWeightedUndirectedGraph(self, absent float64) *WeightedUndirectedGraph { + return &WeightedUndirectedGraph{ + nodes: make(map[int64]graph.Node), + edges: make(map[int64]map[int64]graph.WeightedEdge), + + self: self, + absent: absent, + + nodeIDs: uid.NewSet(), + } +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *WeightedUndirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.edges[n.ID()] = make(map[int64]graph.WeightedEdge) + g.nodeIDs.Use(n.ID()) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedUndirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *WeightedUndirectedGraph) EdgeBetween(xid, yid int64) graph.Edge { + return g.WeightedEdgeBetween(xid, yid) +} + +// Edges returns all the edges in the graph. +func (g *WeightedUndirectedGraph) Edges() graph.Edges { + if len(g.edges) == 0 { + return graph.Empty + } + var edges []graph.Edge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *WeightedUndirectedGraph) From(id int64) graph.Nodes { + if _, ok := g.nodes[id]; !ok { + return graph.Empty + } + + nodes := make([]graph.Node, len(g.edges[id])) + i := 0 + for from := range g.edges[id] { + nodes[i] = g.nodes[from] + i++ + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *WeightedUndirectedGraph) HasEdgeBetween(xid, yid int64) bool { + _, ok := g.edges[xid][yid] + return ok +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *WeightedUndirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// NewWeightedEdge returns a new weighted edge from the source to the destination node. +func (g *WeightedUndirectedGraph) NewWeightedEdge(from, to graph.Node, weight float64) graph.WeightedEdge { + return &WeightedEdge{F: from, T: to, W: weight} +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *WeightedUndirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *WeightedUndirectedGraph) Nodes() graph.Nodes { + if len(g.nodes) == 0 { + return graph.Empty + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return iterator.NewOrderedNodes(nodes) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *WeightedUndirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.edges[fid], tid) + delete(g.edges[tid], fid) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *WeightedUndirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.edges[id] { + delete(g.edges[from], id) + } + delete(g.edges, id) + + g.nodeIDs.Release(id) +} + +// SetWeightedEdge adds a weighted edge from one node to another. If the nodes do not exist, they are added +// and are set to the nodes of the edge otherwise. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *WeightedUndirectedGraph) SetWeightedEdge(e graph.WeightedEdge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if _, ok := g.nodes[fid]; !ok { + g.AddNode(from) + } else { + g.nodes[fid] = from + } + if _, ok := g.nodes[tid]; !ok { + g.AddNode(to) + } else { + g.nodes[tid] = to + } + + g.edges[fid][tid] = e + g.edges[tid][fid] = e +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *WeightedUndirectedGraph) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if n, ok := g.edges[xid]; ok { + if e, ok := n[yid]; ok { + return e.Weight(), true + } + } + return g.absent, false +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedUndirectedGraph) WeightedEdge(uid, vid int64) graph.WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. +func (g *WeightedUndirectedGraph) WeightedEdgeBetween(xid, yid int64) graph.WeightedEdge { + edge, ok := g.edges[xid][yid] + if !ok { + return nil + } + if edge.From().ID() == xid { + return edge + } + return edge.ReversedEdge().(graph.WeightedEdge) +} + +// WeightedEdges returns all the weighted edges in the graph. +func (g *WeightedUndirectedGraph) WeightedEdges() graph.WeightedEdges { + var edges []graph.WeightedEdge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedWeightedEdges(edges) +} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go new file mode 100644 index 00000000..83fdb5bd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go @@ -0,0 +1,250 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// DegeneracyOrdering returns the degeneracy ordering and the k-cores of +// the undirected graph g. +func DegeneracyOrdering(g graph.Undirected) (order []graph.Node, cores [][]graph.Node) { + order, offsets := degeneracyOrdering(g) + + ordered.Reverse(order) + cores = make([][]graph.Node, len(offsets)) + offset := len(order) + for i, n := range offsets { + cores[i] = order[offset-n : offset] + offset -= n + } + return order, cores +} + +// KCore returns the k-core of the undirected graph g with nodes in an +// optimal ordering for the coloring number. +func KCore(k int, g graph.Undirected) []graph.Node { + order, offsets := degeneracyOrdering(g) + + var offset int + for _, n := range offsets[:k] { + offset += n + } + core := make([]graph.Node, len(order)-offset) + copy(core, order[offset:]) + return core +} + +// degeneracyOrdering is the common code for DegeneracyOrdering and KCore. It +// returns l, the nodes of g in optimal ordering for coloring number and +// s, a set of relative offsets into l for each k-core, where k is an index +// into s. +func degeneracyOrdering(g graph.Undirected) (l []graph.Node, s []int) { + nodes := graph.NodesOf(g.Nodes()) + + // The algorithm used here is essentially as described at + // http://en.wikipedia.org/w/index.php?title=Degeneracy_%28graph_theory%29&oldid=640308710 + + // Initialize an output list L in return parameters. + + // Compute a number d_v for each vertex v in G, + // the number of neighbors of v that are not already in L. + // Initially, these numbers are just the degrees of the vertices. + dv := make(map[int64]int, len(nodes)) + var ( + maxDegree int + neighbours = make(map[int64][]graph.Node) + ) + for _, n := range nodes { + id := n.ID() + adj := graph.NodesOf(g.From(id)) + neighbours[id] = adj + dv[id] = len(adj) + if len(adj) > maxDegree { + maxDegree = len(adj) + } + } + + // Initialize an array D such that D[i] contains a list of the + // vertices v that are not already in L for which d_v = i. + d := make([][]graph.Node, maxDegree+1) + for _, n := range nodes { + deg := dv[n.ID()] + d[deg] = append(d[deg], n) + } + + // Initialize k to 0. + k := 0 + // Repeat n times: + s = []int{0} + for range nodes { + // Scan the array cells D[0], D[1], ... until + // finding an i for which D[i] is nonempty. + var ( + i int + di []graph.Node + ) + for i, di = range d { + if len(di) != 0 { + break + } + } + + // Set k to max(k,i). + if i > k { + k = i + s = append(s, make([]int, k-len(s)+1)...) + } + + // Select a vertex v from D[i]. Add v to the + // beginning of L and remove it from D[i]. + var v graph.Node + v, d[i] = di[len(di)-1], di[:len(di)-1] + l = append(l, v) + s[k]++ + delete(dv, v.ID()) + + // For each neighbor w of v not already in L, + // subtract one from d_w and move w to the + // cell of D corresponding to the new value of d_w. + for _, w := range neighbours[v.ID()] { + dw, ok := dv[w.ID()] + if !ok { + continue + } + for i, n := range d[dw] { + if n.ID() == w.ID() { + d[dw][i], d[dw] = d[dw][len(d[dw])-1], d[dw][:len(d[dw])-1] + dw-- + d[dw] = append(d[dw], w) + break + } + } + dv[w.ID()] = dw + } + } + + return l, s +} + +// BronKerbosch returns the set of maximal cliques of the undirected graph g. +func BronKerbosch(g graph.Undirected) [][]graph.Node { + nodes := graph.NodesOf(g.Nodes()) + + // The algorithm used here is essentially BronKerbosch3 as described at + // http://en.wikipedia.org/w/index.php?title=Bron%E2%80%93Kerbosch_algorithm&oldid=656805858 + + p := set.NewNodesSize(len(nodes)) + for _, n := range nodes { + p.Add(n) + } + x := set.NewNodes() + var bk bronKerbosch + order, _ := degeneracyOrdering(g) + ordered.Reverse(order) + for _, v := range order { + neighbours := graph.NodesOf(g.From(v.ID())) + nv := set.NewNodesSize(len(neighbours)) + for _, n := range neighbours { + nv.Add(n) + } + bk.maximalCliquePivot(g, []graph.Node{v}, set.IntersectionOfNodes(p, nv), set.IntersectionOfNodes(x, nv)) + p.Remove(v) + x.Add(v) + } + return bk +} + +type bronKerbosch [][]graph.Node + +func (bk *bronKerbosch) maximalCliquePivot(g graph.Undirected, r []graph.Node, p, x set.Nodes) { + if len(p) == 0 && len(x) == 0 { + *bk = append(*bk, r) + return + } + + neighbours := bk.choosePivotFrom(g, p, x) + nu := set.NewNodesSize(len(neighbours)) + for _, n := range neighbours { + nu.Add(n) + } + for _, v := range p { + if nu.Has(v) { + continue + } + vid := v.ID() + neighbours := graph.NodesOf(g.From(vid)) + nv := set.NewNodesSize(len(neighbours)) + for _, n := range neighbours { + nv.Add(n) + } + + var found bool + for _, n := range r { + if n.ID() == vid { + found = true + break + } + } + var sr []graph.Node + if !found { + sr = append(r[:len(r):len(r)], v) + } + + bk.maximalCliquePivot(g, sr, set.IntersectionOfNodes(p, nv), set.IntersectionOfNodes(x, nv)) + p.Remove(v) + x.Add(v) + } +} + +func (*bronKerbosch) choosePivotFrom(g graph.Undirected, p, x set.Nodes) (neighbors []graph.Node) { + // TODO(kortschak): Investigate the impact of pivot choice that maximises + // |p ⋂ neighbours(u)| as a function of input size. Until then, leave as + // compile time option. + if !tomitaTanakaTakahashi { + for _, n := range p { + return graph.NodesOf(g.From(n.ID())) + } + for _, n := range x { + return graph.NodesOf(g.From(n.ID())) + } + panic("bronKerbosch: empty set") + } + + var ( + max = -1 + pivot graph.Node + ) + maxNeighbors := func(s set.Nodes) { + outer: + for _, u := range s { + nb := graph.NodesOf(g.From(u.ID())) + c := len(nb) + if c <= max { + continue + } + for n := range nb { + if _, ok := p[int64(n)]; ok { + continue + } + c-- + if c <= max { + continue outer + } + } + max = c + pivot = u + neighbors = nb + } + } + maxNeighbors(p) + maxNeighbors(x) + if pivot == nil { + panic("bronKerbosch: empty set") + } + return neighbors +} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go new file mode 100644 index 00000000..28f1b96e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go @@ -0,0 +1,111 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// Builder is a pure topological graph construction type. +type Builder interface { + AddNode(graph.Node) + SetEdge(graph.Edge) +} + +// CliqueGraph builds the clique graph of g in dst using Clique and CliqueGraphEdge +// nodes and edges. The nodes returned by calls to Nodes on the nodes and edges of +// the constructed graph are the cliques and the common nodes between cliques +// respectively. The dst graph is not cleared. +func CliqueGraph(dst Builder, g graph.Undirected) { + cliques := BronKerbosch(g) + + // Construct a consistent view of cliques in g. Sorting costs + // us a little, but not as much as the cliques themselves. + for _, c := range cliques { + sort.Sort(ordered.ByID(c)) + } + sort.Sort(ordered.BySliceIDs(cliques)) + + cliqueNodes := make(cliqueNodeSets, len(cliques)) + for id, c := range cliques { + s := set.NewNodesSize(len(c)) + for _, n := range c { + s.Add(n) + } + ns := &nodeSet{Clique: Clique{id: int64(id), nodes: c}, nodes: s} + dst.AddNode(ns.Clique) + for _, n := range c { + nid := n.ID() + cliqueNodes[nid] = append(cliqueNodes[nid], ns) + } + } + + for _, cliques := range cliqueNodes { + for i, uc := range cliques { + for _, vc := range cliques[i+1:] { + // Retain the nodes that contribute to the + // edge between the cliques. + var edgeNodes []graph.Node + switch 1 { + case len(uc.Clique.nodes): + edgeNodes = []graph.Node{uc.Clique.nodes[0]} + case len(vc.Clique.nodes): + edgeNodes = []graph.Node{vc.Clique.nodes[0]} + default: + for _, n := range set.IntersectionOfNodes(uc.nodes, vc.nodes) { + edgeNodes = append(edgeNodes, n) + } + sort.Sort(ordered.ByID(edgeNodes)) + } + + dst.SetEdge(CliqueGraphEdge{from: uc.Clique, to: vc.Clique, nodes: edgeNodes}) + } + } + } +} + +type cliqueNodeSets map[int64][]*nodeSet + +type nodeSet struct { + Clique + nodes set.Nodes +} + +// Clique is a node in a clique graph. +type Clique struct { + id int64 + nodes []graph.Node +} + +// ID returns the node ID. +func (n Clique) ID() int64 { return n.id } + +// Nodes returns the nodes in the clique. +func (n Clique) Nodes() []graph.Node { return n.nodes } + +// CliqueGraphEdge is an edge in a clique graph. +type CliqueGraphEdge struct { + from, to Clique + nodes []graph.Node +} + +// From returns the from node of the edge. +func (e CliqueGraphEdge) From() graph.Node { return e.from } + +// To returns the to node of the edge. +func (e CliqueGraphEdge) To() graph.Node { return e.to } + +// ReversedEdge returns a new CliqueGraphEdge with +// the edge end points swapped. The nodes of the +// new edge are shared with the receiver. +func (e CliqueGraphEdge) ReversedEdge() graph.Edge { e.from, e.to = e.to, e.from; return e } + +// Nodes returns the common nodes in the cliques of the underlying graph +// corresponding to the from and to nodes in the clique graph. +func (e CliqueGraphEdge) Nodes() []graph.Node { return e.nodes } diff --git a/vendor/gonum.org/v1/gonum/graph/topo/doc.go b/vendor/gonum.org/v1/gonum/graph/topo/doc.go new file mode 100644 index 00000000..cbcdff1e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package topo provides graph topology analysis functions. +package topo // import "gonum.org/v1/gonum/graph/topo" diff --git a/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go new file mode 100644 index 00000000..8a78ba2f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go @@ -0,0 +1,285 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" + "gonum.org/v1/gonum/graph/iterator" +) + +// johnson implements Johnson's "Finding all the elementary +// circuits of a directed graph" algorithm. SIAM J. Comput. 4(1):1975. +// +// Comments in the johnson methods are kept in sync with the comments +// and labels from the paper. +type johnson struct { + adjacent johnsonGraph // SCC adjacency list. + b []set.Ints // Johnson's "B-list". + blocked []bool + s int + + stack []graph.Node + + result [][]graph.Node +} + +// DirectedCyclesIn returns the set of elementary cycles in the graph g. +func DirectedCyclesIn(g graph.Directed) [][]graph.Node { + jg := johnsonGraphFrom(g) + j := johnson{ + adjacent: jg, + b: make([]set.Ints, len(jg.orig)), + blocked: make([]bool, len(jg.orig)), + } + + // len(j.nodes) is the order of g. + for j.s < len(j.adjacent.orig)-1 { + // We use the previous SCC adjacency to reduce the work needed. + sccs := TarjanSCC(j.adjacent.subgraph(j.s)) + // A_k = adjacency structure of strong component K with least + // vertex in subgraph of G induced by {s, s+1, ... ,n}. + j.adjacent = j.adjacent.sccSubGraph(sccs, 2) // Only allow SCCs with >= 2 vertices. + if j.adjacent.order() == 0 { + break + } + + // s = least vertex in V_k + if s := j.adjacent.leastVertexIndex(); s < j.s { + j.s = s + } + for i, v := range j.adjacent.orig { + if !j.adjacent.nodes.Has(v.ID()) { + continue + } + if len(j.adjacent.succ[v.ID()]) > 0 { + j.blocked[i] = false + j.b[i] = make(set.Ints) + } + } + //L3: + _ = j.circuit(j.s) + j.s++ + } + + return j.result +} + +// circuit is the CIRCUIT sub-procedure in the paper. +func (j *johnson) circuit(v int) bool { + f := false + n := j.adjacent.orig[v] + j.stack = append(j.stack, n) + j.blocked[v] = true + + //L1: + for w := range j.adjacent.succ[n.ID()] { + w := j.adjacent.indexOf(w) + if w == j.s { + // Output circuit composed of stack followed by s. + r := make([]graph.Node, len(j.stack)+1) + copy(r, j.stack) + r[len(r)-1] = j.adjacent.orig[j.s] + j.result = append(j.result, r) + f = true + } else if !j.blocked[w] { + if j.circuit(w) { + f = true + } + } + } + + //L2: + if f { + j.unblock(v) + } else { + for w := range j.adjacent.succ[n.ID()] { + j.b[j.adjacent.indexOf(w)].Add(v) + } + } + j.stack = j.stack[:len(j.stack)-1] + + return f +} + +// unblock is the UNBLOCK sub-procedure in the paper. +func (j *johnson) unblock(u int) { + j.blocked[u] = false + for w := range j.b[u] { + j.b[u].Remove(w) + if j.blocked[w] { + j.unblock(w) + } + } +} + +// johnsonGraph is an edge list representation of a graph with helpers +// necessary for Johnson's algorithm +type johnsonGraph struct { + // Keep the original graph nodes and a + // look-up to into the non-sparse + // collection of potentially sparse IDs. + orig []graph.Node + index map[int64]int + + nodes set.Int64s + succ map[int64]set.Int64s +} + +// johnsonGraphFrom returns a deep copy of the graph g. +func johnsonGraphFrom(g graph.Directed) johnsonGraph { + nodes := graph.NodesOf(g.Nodes()) + sort.Sort(ordered.ByID(nodes)) + c := johnsonGraph{ + orig: nodes, + index: make(map[int64]int, len(nodes)), + + nodes: make(set.Int64s, len(nodes)), + succ: make(map[int64]set.Int64s), + } + for i, u := range nodes { + uid := u.ID() + c.index[uid] = i + for _, v := range graph.NodesOf(g.From(uid)) { + if c.succ[uid] == nil { + c.succ[uid] = make(set.Int64s) + c.nodes.Add(uid) + } + c.nodes.Add(v.ID()) + c.succ[uid].Add(v.ID()) + } + } + return c +} + +// order returns the order of the graph. +func (g johnsonGraph) order() int { return g.nodes.Count() } + +// indexOf returns the index of the retained node for the given node ID. +func (g johnsonGraph) indexOf(id int64) int { + return g.index[id] +} + +// leastVertexIndex returns the index into orig of the least vertex. +func (g johnsonGraph) leastVertexIndex() int { + for _, v := range g.orig { + if g.nodes.Has(v.ID()) { + return g.indexOf(v.ID()) + } + } + panic("johnsonCycles: empty set") +} + +// subgraph returns a subgraph of g induced by {s, s+1, ... , n}. The +// subgraph is destructively generated in g. +func (g johnsonGraph) subgraph(s int) johnsonGraph { + sn := g.orig[s].ID() + for u, e := range g.succ { + if u < sn { + g.nodes.Remove(u) + delete(g.succ, u) + continue + } + for v := range e { + if v < sn { + g.succ[u].Remove(v) + } + } + } + return g +} + +// sccSubGraph returns the graph of the tarjan's strongly connected +// components with each SCC containing at least min vertices. +// sccSubGraph returns nil if there is no SCC with at least min +// members. +func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { + if len(g.nodes) == 0 { + g.nodes = nil + g.succ = nil + return g + } + sub := johnsonGraph{ + orig: g.orig, + index: g.index, + nodes: make(set.Int64s), + succ: make(map[int64]set.Int64s), + } + + var n int + for _, scc := range sccs { + if len(scc) < min { + continue + } + n++ + for _, u := range scc { + for _, v := range scc { + if _, ok := g.succ[u.ID()][v.ID()]; ok { + if sub.succ[u.ID()] == nil { + sub.succ[u.ID()] = make(set.Int64s) + sub.nodes.Add(u.ID()) + } + sub.nodes.Add(v.ID()) + sub.succ[u.ID()].Add(v.ID()) + } + } + } + } + if n == 0 { + g.nodes = nil + g.succ = nil + return g + } + + return sub +} + +// Nodes is required to satisfy Tarjan. +func (g johnsonGraph) Nodes() graph.Nodes { + n := make([]graph.Node, 0, len(g.nodes)) + for id := range g.nodes { + n = append(n, johnsonGraphNode(id)) + } + return iterator.NewOrderedNodes(n) +} + +// Successors is required to satisfy Tarjan. +func (g johnsonGraph) From(id int64) graph.Nodes { + adj := g.succ[id] + if len(adj) == 0 { + return graph.Empty + } + succ := make([]graph.Node, 0, len(adj)) + for id := range adj { + succ = append(succ, johnsonGraphNode(id)) + } + return iterator.NewOrderedNodes(succ) +} + +func (johnsonGraph) Has(int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) Node(int64) graph.Node { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) HasEdgeBetween(_, _ int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) Edge(_, _ int64) graph.Edge { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) HasEdgeFromTo(_, _ int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) To(int64) graph.Nodes { + panic("topo: unintended use of johnsonGraph") +} + +type johnsonGraphNode int64 + +func (n johnsonGraphNode) ID() int64 { return int64(n) } diff --git a/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go b/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go new file mode 100644 index 00000000..36171d6f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go @@ -0,0 +1,9 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !tomita + +package topo + +const tomitaTanakaTakahashi = false diff --git a/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go new file mode 100644 index 00000000..44b362a6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go @@ -0,0 +1,83 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/linear" + "gonum.org/v1/gonum/graph/internal/set" +) + +// UndirectedCyclesIn returns a set of cycles that forms a cycle basis in the graph g. +// Any cycle in g can be constructed as a symmetric difference of its elements. +func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { + // From "An algorithm for finding a fundamental set of cycles of a graph" + // https://doi.org/10.1145/363219.363232 + + var cycles [][]graph.Node + done := make(set.Int64s) + var tree linear.NodeStack + nodes := g.Nodes() + for nodes.Next() { + n := nodes.Node() + id := n.ID() + if done.Has(id) { + continue + } + done.Add(id) + + tree = tree[:0] + tree.Push(n) + from := sets{id: set.Int64s{}} + to := map[int64]graph.Node{id: n} + + for tree.Len() != 0 { + u := tree.Pop() + uid := u.ID() + adj := from[uid] + for _, v := range graph.NodesOf(g.From(uid)) { + vid := v.ID() + switch { + case uid == vid: + cycles = append(cycles, []graph.Node{u}) + case !from.has(vid): + done.Add(vid) + to[vid] = u + tree.Push(v) + from.add(uid, vid) + case !adj.Has(vid): + c := []graph.Node{v, u} + adj := from[vid] + p := to[uid] + for !adj.Has(p.ID()) { + c = append(c, p) + p = to[p.ID()] + } + c = append(c, p, c[0]) + cycles = append(cycles, c) + adj.Add(uid) + } + } + } + } + + return cycles +} + +type sets map[int64]set.Int64s + +func (s sets) add(uid, vid int64) { + e, ok := s[vid] + if !ok { + e = make(set.Int64s) + s[vid] = e + } + e.Add(uid) +} + +func (s sets) has(uid int64) bool { + _, ok := s[uid] + return ok +} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go new file mode 100644 index 00000000..64712927 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go @@ -0,0 +1,199 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "fmt" + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// Unorderable is an error containing sets of unorderable graph.Nodes. +type Unorderable [][]graph.Node + +// Error satisfies the error interface. +func (e Unorderable) Error() string { + const maxNodes = 10 + var n int + for _, c := range e { + n += len(c) + } + if n > maxNodes { + // Don't return errors that are too long. + return fmt.Sprintf("topo: no topological ordering: %d nodes in %d cyclic components", n, len(e)) + } + return fmt.Sprintf("topo: no topological ordering: cyclic components: %v", [][]graph.Node(e)) +} + +func lexical(nodes []graph.Node) { sort.Sort(ordered.ByID(nodes)) } + +// Sort performs a topological sort of the directed graph g returning the 'from' to 'to' +// sort order. If a topological ordering is not possible, an Unorderable error is returned +// listing cyclic components in g with each cyclic component's members sorted by ID. When +// an Unorderable error is returned, each cyclic component's topological position within +// the sorted nodes is marked with a nil graph.Node. +func Sort(g graph.Directed) (sorted []graph.Node, err error) { + sccs := TarjanSCC(g) + return sortedFrom(sccs, lexical) +} + +// SortStabilized performs a topological sort of the directed graph g returning the 'from' +// to 'to' sort order, or the order defined by the in place order sort function where there +// is no unambiguous topological ordering. If a topological ordering is not possible, an +// Unorderable error is returned listing cyclic components in g with each cyclic component's +// members sorted by the provided order function. If order is nil, nodes are ordered lexically +// by node ID. When an Unorderable error is returned, each cyclic component's topological +// position within the sorted nodes is marked with a nil graph.Node. +func SortStabilized(g graph.Directed, order func([]graph.Node)) (sorted []graph.Node, err error) { + if order == nil { + order = lexical + } + sccs := tarjanSCCstabilized(g, order) + return sortedFrom(sccs, order) +} + +func sortedFrom(sccs [][]graph.Node, order func([]graph.Node)) ([]graph.Node, error) { + sorted := make([]graph.Node, 0, len(sccs)) + var sc Unorderable + for _, s := range sccs { + if len(s) != 1 { + order(s) + sc = append(sc, s) + sorted = append(sorted, nil) + continue + } + sorted = append(sorted, s[0]) + } + var err error + if sc != nil { + for i, j := 0, len(sc)-1; i < j; i, j = i+1, j-1 { + sc[i], sc[j] = sc[j], sc[i] + } + err = sc + } + ordered.Reverse(sorted) + return sorted, err +} + +// TarjanSCC returns the strongly connected components of the graph g using Tarjan's algorithm. +// +// A strongly connected component of a graph is a set of vertices where it's possible to reach any +// vertex in the set from any other (meaning there's a cycle between them.) +// +// Generally speaking, a directed graph where the number of strongly connected components is equal +// to the number of nodes is acyclic, unless you count reflexive edges as a cycle (which requires +// only a little extra testing.) +// +func TarjanSCC(g graph.Directed) [][]graph.Node { + return tarjanSCCstabilized(g, nil) +} + +func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.Node { + nodes := graph.NodesOf(g.Nodes()) + var succ func(id int64) []graph.Node + if order == nil { + succ = func(id int64) []graph.Node { + return graph.NodesOf(g.From(id)) + } + } else { + order(nodes) + ordered.Reverse(nodes) + + succ = func(id int64) []graph.Node { + to := graph.NodesOf(g.From(id)) + order(to) + ordered.Reverse(to) + return to + } + } + + t := tarjan{ + succ: succ, + + indexTable: make(map[int64]int, len(nodes)), + lowLink: make(map[int64]int, len(nodes)), + onStack: make(set.Int64s), + } + for _, v := range nodes { + if t.indexTable[v.ID()] == 0 { + t.strongconnect(v) + } + } + return t.sccs +} + +// tarjan implements Tarjan's strongly connected component finding +// algorithm. The implementation is from the pseudocode at +// +// http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm?oldid=642744644 +// +type tarjan struct { + succ func(id int64) []graph.Node + + index int + indexTable map[int64]int + lowLink map[int64]int + onStack set.Int64s + + stack []graph.Node + + sccs [][]graph.Node +} + +// strongconnect is the strongconnect function described in the +// wikipedia article. +func (t *tarjan) strongconnect(v graph.Node) { + vID := v.ID() + + // Set the depth index for v to the smallest unused index. + t.index++ + t.indexTable[vID] = t.index + t.lowLink[vID] = t.index + t.stack = append(t.stack, v) + t.onStack.Add(vID) + + // Consider successors of v. + for _, w := range t.succ(vID) { + wID := w.ID() + if t.indexTable[wID] == 0 { + // Successor w has not yet been visited; recur on it. + t.strongconnect(w) + t.lowLink[vID] = min(t.lowLink[vID], t.lowLink[wID]) + } else if t.onStack.Has(wID) { + // Successor w is in stack s and hence in the current SCC. + t.lowLink[vID] = min(t.lowLink[vID], t.indexTable[wID]) + } + } + + // If v is a root node, pop the stack and generate an SCC. + if t.lowLink[vID] == t.indexTable[vID] { + // Start a new strongly connected component. + var ( + scc []graph.Node + w graph.Node + ) + for { + w, t.stack = t.stack[len(t.stack)-1], t.stack[:len(t.stack)-1] + t.onStack.Remove(w.ID()) + // Add w to current strongly connected component. + scc = append(scc, w) + if w.ID() == vID { + break + } + } + // Output the current strongly connected component. + t.sccs = append(t.sccs, scc) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go b/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go new file mode 100644 index 00000000..f85a0d6c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go @@ -0,0 +1,9 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build tomita + +package topo + +const tomitaTanakaTakahashi = true diff --git a/vendor/gonum.org/v1/gonum/graph/topo/topo.go b/vendor/gonum.org/v1/gonum/graph/topo/topo.go new file mode 100644 index 00000000..bece61a6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/topo.go @@ -0,0 +1,68 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/traverse" +) + +// IsPathIn returns whether path is a path in g. +// +// As special cases, IsPathIn returns true for a zero length path or for +// a path of length 1 when the node in path exists in the graph. +func IsPathIn(g graph.Graph, path []graph.Node) bool { + switch len(path) { + case 0: + return true + case 1: + return g.Node(path[0].ID()) != nil + default: + var canReach func(uid, vid int64) bool + switch g := g.(type) { + case graph.Directed: + canReach = g.HasEdgeFromTo + default: + canReach = g.HasEdgeBetween + } + + for i, u := range path[:len(path)-1] { + if !canReach(u.ID(), path[i+1].ID()) { + return false + } + } + return true + } +} + +// PathExistsIn returns whether there is a path in g starting at from extending +// to to. +// +// PathExistsIn exists as a helper function. If many tests for path existence +// are being performed, other approaches will be more efficient. +func PathExistsIn(g graph.Graph, from, to graph.Node) bool { + var t traverse.BreadthFirst + return t.Walk(g, from, func(n graph.Node, _ int) bool { return n.ID() == to.ID() }) != nil +} + +// ConnectedComponents returns the connected components of the undirected graph g. +func ConnectedComponents(g graph.Undirected) [][]graph.Node { + var ( + w traverse.DepthFirst + c []graph.Node + cc [][]graph.Node + ) + during := func(n graph.Node) { + c = append(c, n) + } + after := func() { + cc = append(cc, []graph.Node(nil)) + cc[len(cc)-1] = append(cc[len(cc)-1], c...) + c = c[:0] + } + w.WalkAll(g, nil, after, during) + + return cc +} diff --git a/vendor/gonum.org/v1/gonum/graph/traverse/doc.go b/vendor/gonum.org/v1/gonum/graph/traverse/doc.go new file mode 100644 index 00000000..dc98bbf4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/traverse/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package traverse provides basic graph traversal primitives. +package traverse // import "gonum.org/v1/gonum/graph/traverse" diff --git a/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go new file mode 100644 index 00000000..125b1611 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go @@ -0,0 +1,231 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package traverse + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/linear" + "gonum.org/v1/gonum/graph/internal/set" +) + +var _ Graph = graph.Graph(nil) + +// Graph is the subset of graph.Graph necessary for graph traversal. +type Graph interface { + // From returns all nodes that can be reached directly + // from the node with the given ID. + From(id int64) graph.Nodes + + // Edge returns the edge from u to v, with IDs uid and vid, + // if such an edge exists and nil otherwise. The node v + // must be directly reachable from u as defined by + // the From method. + Edge(uid, vid int64) graph.Edge +} + +// BreadthFirst implements stateful breadth-first graph traversal. +type BreadthFirst struct { + // Visit is called on all nodes on their first visit. + Visit func(graph.Node) + + // Traverse is called on all edges that may be traversed + // during the walk. This includes edges that would hop to + // an already visited node. + // + // The value returned by Traverse determines whether + // an edge can be traversed during the walk. + Traverse func(graph.Edge) bool + + queue linear.NodeQueue + visited set.Int64s +} + +// Walk performs a breadth-first traversal of the graph g starting from the given node, +// depending on the Traverse field and the until parameter if they are non-nil. +// The traversal follows edges for which Traverse(edge) is true and returns the first node +// for which until(node, depth) is true. During the traversal, if the Visit field is +// non-nil, it is called with each node the first time it is visited. +func (b *BreadthFirst) Walk(g Graph, from graph.Node, until func(n graph.Node, d int) bool) graph.Node { + if b.visited == nil { + b.visited = make(set.Int64s) + } + b.queue.Enqueue(from) + if b.Visit != nil && !b.visited.Has(from.ID()) { + b.Visit(from) + } + b.visited.Add(from.ID()) + + var ( + depth int + children int + untilNext = 1 + ) + for b.queue.Len() > 0 { + t := b.queue.Dequeue() + if until != nil && until(t, depth) { + return t + } + tid := t.ID() + to := g.From(tid) + for to.Next() { + n := to.Node() + nid := n.ID() + if b.Traverse != nil && !b.Traverse(g.Edge(tid, nid)) { + continue + } + if b.visited.Has(nid) { + continue + } + if b.Visit != nil { + b.Visit(n) + } + b.visited.Add(nid) + children++ + b.queue.Enqueue(n) + } + if untilNext--; untilNext == 0 { + depth++ + untilNext = children + children = 0 + } + } + + return nil +} + +// WalkAll calls Walk for each unvisited node of the graph g using edges independent +// of their direction. The functions before and after are called prior to commencing +// and after completing each walk if they are non-nil respectively. The function +// during is called on each node as it is traversed. +func (b *BreadthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { + b.Reset() + nodes := g.Nodes() + for nodes.Next() { + from := nodes.Node() + if b.Visited(from) { + continue + } + if before != nil { + before() + } + b.Walk(g, from, func(n graph.Node, _ int) bool { + if during != nil { + during(n) + } + return false + }) + if after != nil { + after() + } + } +} + +// Visited returned whether the node n was visited during a traverse. +func (b *BreadthFirst) Visited(n graph.Node) bool { + return b.visited.Has(n.ID()) +} + +// Reset resets the state of the traverser for reuse. +func (b *BreadthFirst) Reset() { + b.queue.Reset() + b.visited = nil +} + +// DepthFirst implements stateful depth-first graph traversal. +type DepthFirst struct { + // Visit is called on all nodes on their first visit. + Visit func(graph.Node) + + // Traverse is called on all edges that may be traversed + // during the walk. This includes edges that would hop to + // an already visited node. + // + // The value returned by Traverse determines whether an + // edge can be traversed during the walk. + Traverse func(graph.Edge) bool + + stack linear.NodeStack + visited set.Int64s +} + +// Walk performs a depth-first traversal of the graph g starting from the given node, +// depending on the Traverse field and the until parameter if they are non-nil. +// The traversal follows edges for which Traverse(edge) is true and returns the first node +// for which until(node) is true. During the traversal, if the Visit field is non-nil, it +// is called with each node the first time it is visited. +func (d *DepthFirst) Walk(g Graph, from graph.Node, until func(graph.Node) bool) graph.Node { + if d.visited == nil { + d.visited = make(set.Int64s) + } + d.stack.Push(from) + if d.Visit != nil && !d.visited.Has(from.ID()) { + d.Visit(from) + } + d.visited.Add(from.ID()) + + for d.stack.Len() > 0 { + t := d.stack.Pop() + if until != nil && until(t) { + return t + } + tid := t.ID() + to := g.From(tid) + for to.Next() { + n := to.Node() + nid := n.ID() + if d.Traverse != nil && !d.Traverse(g.Edge(tid, nid)) { + continue + } + if d.visited.Has(nid) { + continue + } + if d.Visit != nil { + d.Visit(n) + } + d.visited.Add(nid) + d.stack.Push(n) + } + } + + return nil +} + +// WalkAll calls Walk for each unvisited node of the graph g using edges independent +// of their direction. The functions before and after are called prior to commencing +// and after completing each walk if they are non-nil respectively. The function +// during is called on each node as it is traversed. +func (d *DepthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { + d.Reset() + nodes := g.Nodes() + for nodes.Next() { + from := nodes.Node() + if d.Visited(from) { + continue + } + if before != nil { + before() + } + d.Walk(g, from, func(n graph.Node) bool { + if during != nil { + during(n) + } + return false + }) + if after != nil { + after() + } + } +} + +// Visited returned whether the node n was visited during a traverse. +func (d *DepthFirst) Visited(n graph.Node) bool { + return d.visited.Has(n.ID()) +} + +// Reset resets the state of the traverser for reuse. +func (d *DepthFirst) Reset() { + d.stack = d.stack[:0] + d.visited = nil +} diff --git a/vendor/gonum.org/v1/gonum/graph/undirect.go b/vendor/gonum.org/v1/gonum/graph/undirect.go new file mode 100644 index 00000000..07ce64a0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/undirect.go @@ -0,0 +1,270 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Undirect converts a directed graph to an undirected graph. +type Undirect struct { + G Directed +} + +var _ Undirected = Undirect{} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g Undirect) Node(id int64) Node { return g.G.Node(id) } + +// Nodes returns all the nodes in the graph. +func (g Undirect) Nodes() Nodes { return g.G.Nodes() } + +// From returns all nodes in g that can be reached directly from u. +func (g Undirect) From(uid int64) Nodes { + return newNodeFilterIterator(g.G.From(uid), g.G.To(uid)) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g Undirect) HasEdgeBetween(xid, yid int64) bool { return g.G.HasEdgeBetween(xid, yid) } + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g Undirect) Edge(uid, vid int64) Edge { return g.EdgeBetween(uid, vid) } + +// EdgeBetween returns the edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g Undirect) EdgeBetween(xid, yid int64) Edge { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + if fe == nil && re == nil { + return nil + } + + return EdgePair{fe, re} +} + +// UndirectWeighted converts a directed weighted graph to an undirected weighted graph, +// resolving edge weight conflicts. +type UndirectWeighted struct { + G WeightedDirected + + // Absent is the value used to + // represent absent edge weights + // passed to Merge if the reverse + // edge is present. + Absent float64 + + // Merge defines how discordant edge + // weights in G are resolved. A merge + // is performed if at least one edge + // exists between the nodes being + // considered. The edges corresponding + // to the two weights are also passed, + // in the same order. + // The order of weight parameters + // passed to Merge is not defined, so + // the function should be commutative. + // If Merge is nil, the arithmetic + // mean is used to merge weights. + Merge func(x, y float64, xe, ye Edge) float64 +} + +var ( + _ Undirected = UndirectWeighted{} + _ WeightedUndirected = UndirectWeighted{} +) + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g UndirectWeighted) Node(id int64) Node { return g.G.Node(id) } + +// Nodes returns all the nodes in the graph. +func (g UndirectWeighted) Nodes() Nodes { return g.G.Nodes() } + +// From returns all nodes in g that can be reached directly from u. +func (g UndirectWeighted) From(uid int64) Nodes { + return newNodeFilterIterator(g.G.From(uid), g.G.To(uid)) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g UndirectWeighted) HasEdgeBetween(xid, yid int64) bool { return g.G.HasEdgeBetween(xid, yid) } + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g UndirectWeighted) Edge(uid, vid int64) Edge { return g.WeightedEdgeBetween(uid, vid) } + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g UndirectWeighted) WeightedEdge(uid, vid int64) WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g UndirectWeighted) EdgeBetween(xid, yid int64) Edge { + return g.WeightedEdgeBetween(xid, yid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g UndirectWeighted) WeightedEdgeBetween(xid, yid int64) WeightedEdge { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + if fe == nil && re == nil { + return nil + } + + f, ok := g.G.Weight(xid, yid) + if !ok { + f = g.Absent + } + r, ok := g.G.Weight(yid, xid) + if !ok { + r = g.Absent + } + + var w float64 + if g.Merge == nil { + w = (f + r) / 2 + } else { + w = g.Merge(f, r, fe, re) + } + return WeightedEdgePair{EdgePair: [2]Edge{fe, re}, W: w} +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node the internal node weight is returned. If there is no joining +// edge between the two nodes the weight value returned is zero. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g UndirectWeighted) Weight(xid, yid int64) (w float64, ok bool) { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + + f, fOk := g.G.Weight(xid, yid) + if !fOk { + f = g.Absent + } + r, rOK := g.G.Weight(yid, xid) + if !rOK { + r = g.Absent + } + ok = fOk || rOK + + if g.Merge == nil { + return (f + r) / 2, ok + } + return g.Merge(f, r, fe, re), ok +} + +// EdgePair is an opposed pair of directed edges. +type EdgePair [2]Edge + +// From returns the from node of the first non-nil edge, or nil. +func (e EdgePair) From() Node { + if e[0] != nil { + return e[0].From() + } else if e[1] != nil { + return e[1].From() + } + return nil +} + +// To returns the to node of the first non-nil edge, or nil. +func (e EdgePair) To() Node { + if e[0] != nil { + return e[0].To() + } else if e[1] != nil { + return e[1].To() + } + return nil +} + +// ReversedEdge returns a new Edge with the end point of the +// edges in the pair swapped. +func (e EdgePair) ReversedEdge() Edge { + if e[0] != nil { + e[0] = e[0].ReversedEdge() + } + if e[1] != nil { + e[1] = e[1].ReversedEdge() + } + return e +} + +// WeightedEdgePair is an opposed pair of directed edges. +type WeightedEdgePair struct { + EdgePair + W float64 +} + +// ReversedEdge returns a new Edge with the end point of the +// edges in the pair swapped. +func (e WeightedEdgePair) ReversedEdge() Edge { + e.EdgePair = e.EdgePair.ReversedEdge().(EdgePair) + return e +} + +// Weight returns the merged edge weights of the two edges. +func (e WeightedEdgePair) Weight() float64 { return e.W } + +// nodeFilterIterator combines two Nodes to produce a single stream of +// unique nodes. +type nodeFilterIterator struct { + a, b Nodes + + // unique indicates the node in b with the key ID is unique. + unique map[int64]bool +} + +func newNodeFilterIterator(a, b Nodes) *nodeFilterIterator { + n := nodeFilterIterator{a: a, b: b, unique: make(map[int64]bool)} + for n.b.Next() { + n.unique[n.b.Node().ID()] = true + } + n.b.Reset() + for n.a.Next() { + n.unique[n.a.Node().ID()] = false + } + n.a.Reset() + return &n +} + +func (n *nodeFilterIterator) Len() int { + return len(n.unique) +} + +func (n *nodeFilterIterator) Next() bool { + n.Len() + if n.a.Next() { + return true + } + for n.b.Next() { + if n.unique[n.b.Node().ID()] { + return true + } + } + return false +} + +func (n *nodeFilterIterator) Node() Node { + if n.a.Len() != 0 { + return n.a.Node() + } + return n.b.Node() +} + +func (n *nodeFilterIterator) Reset() { + n.a.Reset() + n.b.Reset() +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s new file mode 100644 index 00000000..0a4c14c2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s @@ -0,0 +1,134 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SI // SI = &x + MOVQ y_base+40(FP), DI // DI = &y + MOVQ n+64(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+88(FP), R8 // R8 = ix // Load the first index + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ iy+96(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + LEAQ (SI)(R8*1), SI // SI = &(x[ix]) + LEAQ (DI)(R9*1), DI // DI = &(y[iy]) + MOVQ DI, DX // DX = DI // Separate Read/Write pointers + MOVQ incX+72(FP), R8 // R8 = incX + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ incY+80(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SI)(R8*1), X4 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVUPS (SI), X6 + MOVUPS (SI)(R8*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + ADDPD (DX)(R9*1), X5 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDPD (DX), X7 + ADDPD (DX)(R9*1), X9 + MOVUPS X3, (DI) // dst[i] = X_(i+1) + MOVUPS X5, (DI)(R9*1) + LEAQ (DI)(R9*2), DI + MOVUPS X7, (DI) + MOVUPS X9, (DI)(R9*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI), X3 + MOVUPS X3, (DI) // y[i] = X_i + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s new file mode 100644 index 00000000..cb57f4be --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s @@ -0,0 +1,141 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+56(FP), SI // SI = &x + MOVQ y_base+80(FP), DX // DX = &y + MOVQ n+104(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+128(FP), R8 // R8 = ix // Load the first index + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ iy+136(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVQ idst+32(FP), R10 // R10 = idst + SHLQ $4, R10 // R10 *= sizeof(complex128) + LEAQ (SI)(R8*1), SI // SI = &(x[ix]) + LEAQ (DX)(R9*1), DX // DX = &(y[iy]) + LEAQ (DI)(R10*1), DI // DI = &(dst[idst]) + MOVQ incX+112(FP), R8 // R8 = incX + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ incY+120(FP), R9 // R9 = incY + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVQ incDst+24(FP), R10 // R10 = incDst + SHLQ $4, R10 // R10 *= sizeof(complex128) + MOVUPS alpha+40(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SI)(R8*1), X4 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + + MOVUPS (SI), X6 + MOVUPS (SI)(R8*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + ADDPD (DX)(R9*1), X5 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDPD (DX), X7 + ADDPD (DX)(R9*1), X9 + MOVUPS X3, (DI) // dst[i] = X_(i+1) + MOVUPS X5, (DI)(R10*1) + LEAQ (DI)(R10*2), DI + MOVUPS X7, (DI) + MOVUPS X9, (DI)(R10*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + MOVUPS X3, (DI) // y[i] X_(i+1) + ADDQ R8, SI // SI += incX + ADDQ R9, DX // DX += incY + ADDQ R10, DI // DI += incDst + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s new file mode 100644 index 00000000..f1fddce7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s @@ -0,0 +1,122 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitary(alpha complex128, x, y []complex128) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SI // SI = &x + MOVQ y_base+40(FP), DI // DI = &y + MOVQ x_len+24(FP), CX // CX = min( len(x), len(y) ) + CMPQ y_len+48(FP), CX + CMOVQLE y_len+48(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + PXOR X0, X0 // Clear work registers and cache-align loop + PXOR X1, X1 + MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SI)(AX*8), X4 + MOVUPS 32(SI)(AX*8), X6 + MOVUPS 48(SI)(AX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI)(AX*8), X3 + ADDPD 16(DI)(AX*8), X5 + ADDPD 32(DI)(AX*8), X7 + ADDPD 48(DI)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX + JNZ caxy_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI)(AX*8), X3 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + ADDQ $2, AX // i += 2 + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s new file mode 100644 index 00000000..b80015fd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s @@ -0,0 +1,123 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitaryTo(dst []complex128, alpha complex64, x, y []complex128) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+40(FP), SI // SI = &x + MOVQ y_base+64(FP), DX // DX = &y + MOVQ x_len+48(FP), CX // CX = min( len(x), len(y), len(dst) ) + CMPQ y_len+72(FP), CX + CMOVQLE y_len+72(FP), CX + CMPQ dst_len+8(FP), CX + CMOVQLE dst_len+8(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + MOVUPS alpha+24(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SI)(AX*8), X4 + MOVUPS 32(SI)(AX*8), X6 + MOVUPS 48(SI)(AX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 // Load and duplicate imag elements (xi, xi) + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 // duplicate real elements (xr, xr) + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX)(AX*8), X3 + ADDPD 16(DX)(AX*8), X5 + ADDPD 32(DX)(AX*8), X7 + ADDPD 48(DX)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX + JNZ caxy_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // Same calculation, but read in values to avoid trampling memory + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX)(AX*8), X3 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + ADDQ $2, AX // i += 2 + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go new file mode 100644 index 00000000..8802ff13 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package c128 provides complex128 vector primitives. +package c128 // import "gonum.org/v1/gonum/internal/asm/c128" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s new file mode 100644 index 00000000..301d294f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s @@ -0,0 +1,153 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3 +#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5 +#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7 +#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9 + +#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2 +#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4 +#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6 +#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) +TEXT ·DotcInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128) + SHLQ $4, INC_X + MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128) + SHLQ $4, INC_Y + LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX + SHLQ $4, INC_X // INC_X *= sizeof(complex128) + MOVQ incY+64(FP), INC_Y // INC_Y = incY + SHLQ $4, INC_Y // INC_Y *= sizeof(complex128) + MOVSD $(-1.0), NEG1 + SHUFPD $0, NEG1, NEG1 // { -1, -1 } + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dot_tail // if n <= 4 { goto dot_tail } + MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128) + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128) + +dot_loop: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_XPTR_INCX__X5 + MOVDDUP_XPTR_INCX_2__X7 + MOVDDUP_XPTR_INCx3X__X9 + + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_8_XPTR_INCX__X4 + MOVDDUP_8_XPTR_INCX_2__X6 + MOVDDUP_8_XPTR_INCx3X__X8 + + // X_i = { -imag(x[i]), -imag(x[i]) } + MULPD NEG1, X2 + MULPD P_NEG1, X4 + MULPD NEG1, X6 + MULPD P_NEG1, X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR), X10 + MOVUPS (Y_PTR)(INC_Y*1), X11 + MOVUPS (Y_PTR)(INC_Y*2), X12 + MOVUPS (Y_PTR)(INCx3_Y*1), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // sum += result[i] + ADDQ INC_X, X_PTR // X_PTR += incX + ADDQ INC_Y, Y_PTR // Y_PTR += incY + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVUPS SUM, sum+88(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s new file mode 100644 index 00000000..1db7e156 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s @@ -0,0 +1,143 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3 +#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5 +#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7 +#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9 + +#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2 +#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4 +#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6 +#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcUnitary(x, y []complex128) (sum complex128) +TEXT ·DotcUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // sum = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + XORPS P_SUM, P_SUM // psum = 0 + MOVSD $(-1.0), NEG1 + SHUFPD $0, NEG1, NEG1 // { -1, -1 } + XORQ IDX, IDX // i := 0 + MOVQ $1, I_IDX // j := 1 + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = floor( TAIL / 4 ) + SHRQ $2, LEN // LEN = TAIL % 4 + JZ dot_tail // if LEN == 0 { goto dot_tail } + + MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining + +dot_loop: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_16_XPTR_IDX_8__X5 + MOVDDUP_32_XPTR_IDX_8__X7 + MOVDDUP_48_XPTR_IDX_8__X9 + + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_16_XPTR_IIDX_8__X4 + MOVDDUP_32_XPTR_IIDX_8__X6 + MOVDDUP_48_XPTR_IIDX_8__X8 + + // X_i = { -imag(x[i]), -imag(x[i]) } + MULPD NEG1, X2 + MULPD P_NEG1, X4 + MULPD NEG1, X6 + MULPD P_NEG1, X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + ADDQ $8, I_IDX // I_IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i]) , real(x[i]) } + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) } + MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // SUM += result[i] + ADDQ $2, IDX // IDX += 2 + ADDQ $2, I_IDX // I_IDX += 2 + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVUPS SUM, sum+48(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s new file mode 100644 index 00000000..386467fc --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s @@ -0,0 +1,141 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3 +#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5 +#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7 +#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9 + +#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2 +#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4 +#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6 +#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 + +// func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) +TEXT ·DotuInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // sum = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128) + SHLQ $4, INC_X + MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128) + SHLQ $4, INC_Y + LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX + SHLQ $4, INC_X // INC_X *= sizeof(complex128) + MOVQ incY+64(FP), INC_Y // INC_Y = incY + SHLQ $4, INC_Y // INC_Y *= sizeof(complex128) + MOVQ LEN, TAIL + ANDQ $3, TAIL // LEN = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN <= 4 { goto dot_tail } + PXOR P_SUM, P_SUM // psum = 0 + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128) + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128) + +dot_loop: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_XPTR_INCX__X5 + MOVDDUP_XPTR_INCX_2__X7 + MOVDDUP_XPTR_INCx3X__X9 + + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_8_XPTR_INCX__X4 + MOVDDUP_8_XPTR_INCX_2__X6 + MOVDDUP_8_XPTR_INCx3X__X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR), X10 + MOVUPS (Y_PTR)(INC_Y*1), X11 + MOVUPS (Y_PTR)(INC_Y*2), X12 + MOVUPS (Y_PTR)(INCx3_Y*1), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + + DECQ LEN + JNZ dot_loop // } while --BX > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // sum += result[i] + ADDQ INC_X, X_PTR // X_PTR += incX + ADDQ INC_Y, Y_PTR // Y_PTR += incY + DECQ TAIL // --TAIL + JNZ dot_tail // } while TAIL > 0 + +dot_end: + MOVUPS SUM, sum+88(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s new file mode 100644 index 00000000..d0d507cd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s @@ -0,0 +1,130 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3 +#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5 +#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7 +#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9 + +#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2 +#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4 +#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6 +#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX + +// func DotuUnitary(x, y []complex128) (sum complex128) +TEXT ·DotuUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + PXOR P_SUM, P_SUM // P_SUM = 0 + XORQ IDX, IDX // IDX = 0 + MOVQ $1, DX // j = 1 + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = floor( LEN / 4 ) + SHRQ $2, LEN // LEN = LEN % 4 + JZ dot_tail // if LEN == 0 { goto dot_tail } + +dot_loop: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_16_XPTR_IDX_8__X5 + MOVDDUP_32_XPTR_IDX_8__X7 + MOVDDUP_48_XPTR_IDX_8__X9 + + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_16_XPTR_IIDX_8__X4 + MOVDDUP_32_XPTR_IIDX_8__X6 + MOVDDUP_48_XPTR_IIDX_8__X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + ADDQ $8, I_IDX // I_IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i] , real(x[i]) } + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // psum += result[i] + ADDQ $2, IDX // IDX += 2 + ADDQ $2, I_IDX // I_IDX += 2 + DECQ TAIL // --TAIL + JNZ dot_tail // } while TAIL > 0 + +dot_end: + MOVUPS SUM, sum+48(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s new file mode 100644 index 00000000..40d5851a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s @@ -0,0 +1,69 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define TAIL BX +#define INC R9 +#define INC3 R10 +#define ALPHA X0 +#define ALPHA_2 X1 + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0 + +// func DscalInc(alpha float64, x []complex128, n, inc uintptr) +TEXT ·DscalInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SRC // SRC = &x + MOVQ n+32(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE dscal_end + + MOVDDUP_ALPHA // ALPHA = alpha + MOVQ inc+40(FP), INC // INC = inc + SHLQ $4, INC // INC = INC * sizeof(complex128) + LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC + MOVUPS ALPHA, ALPHA_2 // Copy ALPHA and ALPHA_2 for pipelining + MOVQ LEN, TAIL // TAIL = LEN + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dscal_tail // if LEN == 0 { goto dscal_tail } + +dscal_loop: // do { + MOVUPS (SRC), X2 // X_i = x[i] + MOVUPS (SRC)(INC*1), X3 + MOVUPS (SRC)(INC*2), X4 + MOVUPS (SRC)(INC3*1), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST) // x[i] = X_i + MOVUPS X3, (DST)(INC*1) + MOVUPS X4, (DST)(INC*2) + MOVUPS X5, (DST)(INC3*1) + + LEAQ (SRC)(INC*4), SRC // SRC += INC*4 + DECQ LEN + JNZ dscal_loop // } while --LEN > 0 + +dscal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JE dscal_end // if TAIL == 0 { return } + +dscal_tail_loop: // do { + MOVUPS (SRC), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST) // x[i] = X_i + ADDQ INC, SRC // SRC += INC + DECQ TAIL + JNZ dscal_tail_loop // } while --TAIL > 0 + +dscal_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s new file mode 100644 index 00000000..cbc0768a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s @@ -0,0 +1,66 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define IDX AX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0 + +// func DscalUnitary(alpha float64, x []complex128) +TEXT ·DscalUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SRC // SRC = &x + MOVQ x_len+16(FP), LEN // LEN = len(x) + CMPQ LEN, $0 // if LEN == 0 { return } + JE dscal_end + + MOVDDUP_ALPHA // ALPHA = alpha + XORQ IDX, IDX // IDX = 0 + MOVUPS ALPHA, ALPHA_2 // Copy ALPHA to ALPHA_2 for pipelining + MOVQ LEN, TAIL // TAIL = LEN + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dscal_tail // if LEN == 0 { goto dscal_tail } + +dscal_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(SRC)(IDX*8), X3 + MOVUPS 32(SRC)(IDX*8), X4 + MOVUPS 48(SRC)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST)(IDX*8) // x[i] = X_i + MOVUPS X3, 16(DST)(IDX*8) + MOVUPS X4, 32(DST)(IDX*8) + MOVUPS X5, 48(DST)(IDX*8) + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dscal_loop // } while --LEN > 0 + +dscal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ dscal_end // if TAIL == 0 { return } + +dscal_tail_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST)(IDX*8) // x[i] = X_i + ADDQ $2, IDX // IDX += 2 + DECQ TAIL + JNZ dscal_tail_loop // } while --TAIL > 0 + +dscal_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go new file mode 100644 index 00000000..47a80e50 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go @@ -0,0 +1,31 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c128 + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []complex128, alpha complex128, x []complex128) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []complex128, incDst uintptr, alpha complex128, x []complex128, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s new file mode 100644 index 00000000..7b807b3a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s @@ -0,0 +1,116 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define IDX AX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_C X1 +#define ALPHA2 X10 +#define ALPHA_C2 X11 + +#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3 +#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5 +#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7 +#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +// func ScalUnitary(alpha complex128, x []complex128) +TEXT ·ScalUnitary(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SRC // SRC = &x + MOVQ x_len+24(FP), LEN // LEN = len(x) + CMPQ LEN, $0 // if LEN == 0 { return } + JE scal_end + + MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) } + MOVAPS ALPHA, ALPHA_C + SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) } + + XORQ IDX, IDX // IDX = 0 + MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining + MOVAPS ALPHA_C, ALPHA_C2 + MOVQ LEN, TAIL + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ scal_tail // if BX == 0 { goto scal_tail } + +scal_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SRC)(IDX*8), X4 + MOVUPS 32(SRC)(IDX*8), X6 + MOVUPS 48(SRC)(IDX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + MULPD ALPHA_C, X2 + MULPD ALPHA, X3 + MULPD ALPHA_C2, X4 + MULPD ALPHA2, X5 + MULPD ALPHA_C, X6 + MULPD ALPHA, X7 + MULPD ALPHA_C2, X8 + MULPD ALPHA2, X9 + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1) + MOVUPS X5, 16(DST)(IDX*8) + MOVUPS X7, 32(DST)(IDX*8) + MOVUPS X9, 48(DST)(IDX*8) + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ scal_loop // } while --LEN > 0 + +scal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ scal_end // if TAIL == 0 { return } + +scal_tail_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1) + ADDQ $2, IDX // IDX += 2 + DECQ TAIL + JNZ scal_tail_loop // } while --LEN > 0 + +scal_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s new file mode 100644 index 00000000..7857c155 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s @@ -0,0 +1,121 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define TAIL BX +#define INC R9 +#define INC3 R10 +#define ALPHA X0 +#define ALPHA_C X1 +#define ALPHA2 X10 +#define ALPHA_C2 X11 + +#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3 +#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5 +#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7 +#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +// func ScalInc(alpha complex128, x []complex128, n, inc uintptr) +TEXT ·ScalInc(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SRC // SRC = &x + MOVQ n+40(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE scal_end // if LEN == 0 { return } + + MOVQ inc+48(FP), INC // INC = inc + SHLQ $4, INC // INC = INC * sizeof(complex128) + LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC + + MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) } + MOVAPS ALPHA, ALPHA_C + SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) } + + MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining + MOVAPS ALPHA_C, ALPHA_C2 + MOVQ LEN, TAIL + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ scal_tail // if BX == 0 { goto scal_tail } + +scal_loop: // do { + MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SRC)(INC*1), X4 + MOVUPS (SRC)(INC*2), X6 + MOVUPS (SRC)(INC3*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + MULPD ALPHA_C, X2 + MULPD ALPHA, X3 + MULPD ALPHA_C2, X4 + MULPD ALPHA2, X5 + MULPD ALPHA_C, X6 + MULPD ALPHA, X7 + MULPD ALPHA_C2, X8 + MULPD ALPHA2, X9 + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + MOVUPS X3, (DST) // x[i] = X_(i+1) + MOVUPS X5, (DST)(INC*1) + MOVUPS X7, (DST)(INC*2) + MOVUPS X9, (DST)(INC3*1) + + LEAQ (SRC)(INC*4), SRC // SRC = &(SRC[inc*4]) + DECQ LEN + JNZ scal_loop // } while --BX > 0 + +scal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JE scal_end // if TAIL == 0 { return } + +scal_tail_loop: // do { + MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + MOVUPS X3, (DST) // x[i] = X_i + ADDQ INC, SRC // SRC = &(SRC[incX]) + DECQ TAIL + JNZ scal_tail_loop // } while --TAIL > 0 + +scal_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go new file mode 100644 index 00000000..ad6b23ca --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go @@ -0,0 +1,96 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package c128 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex128, x, y []complex128) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) + +// DscalUnitary is +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } +func DscalUnitary(alpha float64, x []complex128) + +// DscalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } +func DscalInc(alpha float64, x []complex128, n, inc uintptr) + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha complex128, x []complex128, n, inc uintptr) + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha complex128, x []complex128) + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * cmplx.Conj(v) +// } +// return sum +func DotcUnitary(x, y []complex128) (sum complex128) + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * cmplx.Conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex128) (sum complex128) + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go new file mode 100644 index 00000000..6313e571 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go @@ -0,0 +1,163 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package c128 + +import "math/cmplx" + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex128, x, y []complex128) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} + +// DscalUnitary is +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } +func DscalUnitary(alpha float64, x []complex128) { + for i, v := range x { + x[i] = complex(real(v)*alpha, imag(v)*alpha) + } +} + +// DscalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } +func DscalInc(alpha float64, x []complex128, n, inc uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) + ix += inc + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha complex128, x []complex128, n, inc uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += inc + } +} + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha complex128, x []complex128) { + for i := range x { + x[i] *= alpha + } +} + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * cmplx.Conj(v) +// } +// return sum +func DotcUnitary(x, y []complex128) (sum complex128) { + for i, v := range x { + sum += y[i] * cmplx.Conj(v) + } + return sum +} + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * cmplx.Conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { + for i := 0; i < int(n); i++ { + sum += y[iy] * cmplx.Conj(x[ix]) + ix += incX + iy += incY + } + return sum +} + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex128) (sum complex128) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s new file mode 100644 index 00000000..841415db --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s @@ -0,0 +1,151 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVSHDUP X3, X2 +#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 +// MOVSLDUP X3, X3 +#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB +// ADDSUBPS X2, X3 +#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA + +// MOVSHDUP X5, X4 +#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 +// MOVSLDUP X5, X5 +#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED +// ADDSUBPS X4, X5 +#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC + +// MOVSHDUP X7, X6 +#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 +// MOVSLDUP X7, X7 +#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF +// ADDSUBPS X6, X7 +#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE + +// MOVSHDUP X9, X8 +#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 +// MOVSLDUP X9, X9 +#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 +// ADDSUBPS X8, X9 +#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ n+56(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+80(FP), R8 // R8 = ix + MOVQ iy+88(FP), R9 // R9 = iy + LEAQ (SI)(R8*8), SI // SI = &(x[ix]) + LEAQ (DI)(R9*8), DI // DI = &(y[iy]) + MOVQ DI, DX // DX = DI // Read/Write pointers + MOVQ incX+64(FP), R8 // R8 = incX + SHLQ $3, R8 // R8 *= sizeof(complex64) + MOVQ incY+72(FP), R9 // R9 = incY + SHLQ $3, R9 // R9 *= sizeof(complex64) + MOVSD alpha+0(FP), X0 // X0 = { 0, 0, imag(a), real(a) } + MOVAPS X0, X1 + SHUFPS $0x11, X1, X1 // X1 = { 0, 0, real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVSD (SI), X3 // X_i = { imag(x[i+1]), real(x[i+1]) } + MOVSD (SI)(R8*1), X5 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSD (SI), X7 + MOVSD (SI)(R8*1), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPS X1, X2 + MULPS X0, X3 + MULPS X11, X4 + MULPS X10, X5 + MULPS X1, X6 + MULPS X0, X7 + MULPS X11, X8 + MULPS X10, X9 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + MOVSD (DX), X2 + MOVSD (DX)(R9*1), X4 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + MOVSD (DX), X6 + MOVSD (DX)(R9*1), X8 + ADDPS X2, X3 + ADDPS X4, X5 + ADDPS X6, X7 + ADDPS X8, X9 + + MOVSD X3, (DI) // y[i] = X_i + MOVSD X5, (DI)(R9*1) + LEAQ (DI)(R9*2), DI // DI = &(DI[incDst]) + MOVSD X7, (DI) + MOVSD X9, (DI)(R9*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R9*2), DI // DI = &(DI[incDst]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: // do { + MOVSD (SI), X3 // X_i = { imag(x[i+1]), real(x[i+1]) } + MOVSHDUP_X3_X2 // X_(i-1) = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 // X_i = { imag(x[i]), imag(x[i]) } + + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X1, X2 + MULPS X0, X3 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // } + ADDSUBPS_X2_X3 // (ai*x1r+ar*x1i, ar*x1r-ai*x1i) + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + MOVSD (DI), X4 + ADDPS X4, X3 + MOVSD X3, (DI) // y[i] = X_i + ADDQ R8, SI // SI += incX + ADDQ R9, DI // DI += incY + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s new file mode 100644 index 00000000..5c5228dc --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s @@ -0,0 +1,156 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVSHDUP X3, X2 +#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 +// MOVSLDUP X3, X3 +#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB +// ADDSUBPS X2, X3 +#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA + +// MOVSHDUP X5, X4 +#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 +// MOVSLDUP X5, X5 +#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED +// ADDSUBPS X4, X5 +#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC + +// MOVSHDUP X7, X6 +#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 +// MOVSLDUP X7, X7 +#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF +// ADDSUBPS X6, X7 +#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE + +// MOVSHDUP X9, X8 +#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 +// MOVSLDUP X9, X9 +#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 +// ADDSUBPS X8, X9 +#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+48(FP), SI // SI = &x + MOVQ y_base+72(FP), DX // DX = &y + MOVQ n+96(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+120(FP), R8 // Load the first index + MOVQ iy+128(FP), R9 + MOVQ idst+32(FP), R10 + LEAQ (SI)(R8*8), SI // SI = &(x[ix]) + LEAQ (DX)(R9*8), DX // DX = &(y[iy]) + LEAQ (DI)(R10*8), DI // DI = &(dst[idst]) + MOVQ incX+104(FP), R8 // Incrementors*8 for easy iteration (ADDQ) + SHLQ $3, R8 + MOVQ incY+112(FP), R9 + SHLQ $3, R9 + MOVQ incDst+24(FP), R10 + SHLQ $3, R10 + MOVSD alpha+40(FP), X0 // X0 = { 0, 0, imag(a), real(a) } + MOVAPS X0, X1 + SHUFPS $0x11, X1, X1 // X1 = { 0, 0, real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVSD (SI), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSD (SI)(R8*1), X5 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSD (SI), X7 + MOVSD (SI)(R8*1), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPS X1, X2 + MULPS X0, X3 + MULPS X11, X4 + MULPS X10, X5 + MULPS X1, X6 + MULPS X0, X7 + MULPS X11, X8 + MULPS X10, X9 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + MOVSD (DX), X2 + MOVSD (DX)(R9*1), X4 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + MOVSD (DX), X6 + MOVSD (DX)(R9*1), X8 + ADDPS X2, X3 + ADDPS X4, X5 + ADDPS X6, X7 + ADDPS X8, X9 + + MOVSD X3, (DI) // y[i] = X_i + MOVSD X5, (DI)(R10*1) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst]) + MOVSD X7, (DI) + MOVSD X9, (DI)(R10*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: + MOVSD (SI), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X1, X2 + MULPS X0, X3 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // } + ADDSUBPS_X2_X3 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + MOVSD (DX), X4 + ADDPS X4, X3 + MOVSD X3, (DI) // y[i] = X_i + ADDQ R8, SI // SI += incX + ADDQ R9, DX // DX += incY + ADDQ R10, DI // DI += incDst + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s new file mode 100644 index 00000000..ae744a49 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s @@ -0,0 +1,160 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVSHDUP X3, X2 +#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 +// MOVSLDUP X3, X3 +#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB +// ADDSUBPS X2, X3 +#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA + +// MOVSHDUP X5, X4 +#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 +// MOVSLDUP X5, X5 +#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED +// ADDSUBPS X4, X5 +#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC + +// MOVSHDUP X7, X6 +#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 +// MOVSLDUP X7, X7 +#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF +// ADDSUBPS X6, X7 +#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE + +// MOVSHDUP X9, X8 +#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 +// MOVSLDUP X9, X9 +#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 +// ADDSUBPS X8, X9 +#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitary(alpha complex64, x, y []complex64) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ x_len+16(FP), CX // CX = min( len(x), len(y) ) + CMPQ y_len+40(FP), CX + CMOVQLE y_len+40(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + PXOR X0, X0 // Clear work registers and cache-align loop + PXOR X1, X1 + MOVSD alpha+0(FP), X0 // X0 = { 0, 0, imag(a), real(a) } + SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) } + MOVAPS X0, X1 + SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVQ DI, BX // Align on 16-byte boundary for ADDPS + ANDQ $15, BX // BX = &y & 15 + JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim } + + // Trim first value in unaligned buffer + XORPS X2, X2 // Clear work registers and cache-align loop + XORPS X3, X3 + XORPS X4, X4 + MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } + MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) } + ADDSUBPS_X2_X3 + MOVSD (DI)(AX*8), X4 // X3 += y[i] + ADDPS X4, X3 + MOVSD X3, (DI)(AX*8) // y[i] = X3 + INCQ AX // i++ + DECQ CX // --CX + JZ caxy_end // if CX == 0 { return } + +caxy_no_trim: + MOVAPS X0, X10 // Copy X0 and X1 for pipelineing + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $7, CX // CX = n % 8 + SHRQ $3, BX // BX = floor( n / 8 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: // do { + // X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) } + MOVUPS (SI)(AX*8), X3 + MOVUPS 16(SI)(AX*8), X5 + MOVUPS 32(SI)(AX*8), X7 + MOVUPS 48(SI)(AX*8), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]), + // imag(a) * real(x[i+1]), real(a) * real(x[i+1]) } + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]), + // real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) } + MULPS X1, X2 + MULPS X0, X3 + MULPS X11, X4 + MULPS X10, X5 + MULPS X1, X6 + MULPS X0, X7 + MULPS X11, X8 + MULPS X10, X9 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]), + // real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]), + // imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) } + ADDPS (DI)(AX*8), X3 + ADDPS 16(DI)(AX*8), X5 + ADDPS 32(DI)(AX*8), X7 + ADDPS 48(DI)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX // --BX + JNZ caxy_loop // } while BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // do { + MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } + MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(a)*real(x[i]) - imag(a)*imag(x[i]) } + ADDSUBPS_X2_X3 + MOVSD (DI)(AX*8), X4 // X3 += y[i] + ADDPS X4, X3 + MOVSD X3, (DI)(AX*8) // y[i] = X3 + INCQ AX // ++i + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s new file mode 100644 index 00000000..a5d70209 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s @@ -0,0 +1,157 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVSHDUP X3, X2 +#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 +// MOVSLDUP X3, X3 +#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB +// ADDSUBPS X2, X3 +#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA + +// MOVSHDUP X5, X4 +#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 +// MOVSLDUP X5, X5 +#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED +// ADDSUBPS X4, X5 +#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC + +// MOVSHDUP X7, X6 +#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 +// MOVSLDUP X7, X7 +#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF +// ADDSUBPS X6, X7 +#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE + +// MOVSHDUP X9, X8 +#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 +// MOVSLDUP X9, X9 +#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 +// ADDSUBPS X8, X9 +#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+32(FP), SI // SI = &x + MOVQ y_base+56(FP), DX // DX = &y + MOVQ x_len+40(FP), CX + CMPQ y_len+64(FP), CX // CX = min( len(x), len(y), len(dst) ) + CMOVQLE y_len+64(FP), CX + CMPQ dst_len+8(FP), CX + CMOVQLE dst_len+8(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + MOVSD alpha+24(FP), X0 // X0 = { 0, 0, imag(a), real(a) } + SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) } + MOVAPS X0, X1 + SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVQ DX, BX // Align on 16-byte boundary for ADDPS + ANDQ $15, BX // BX = &y & 15 + JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim } + + MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } + MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) } + ADDSUBPS_X2_X3 + MOVSD (DX)(AX*8), X4 // X3 += y[i] + ADDPS X4, X3 + MOVSD X3, (DI)(AX*8) // dst[i] = X3 + INCQ AX // i++ + DECQ CX // --CX + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_no_trim: + MOVAPS X0, X10 // Copy X0 and X1 for pipelineing + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $7, CX // CX = n % 8 + SHRQ $3, BX // BX = floor( n / 8 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: + // X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) } + MOVUPS (SI)(AX*8), X3 + MOVUPS 16(SI)(AX*8), X5 + MOVUPS 32(SI)(AX*8), X7 + MOVUPS 48(SI)(AX*8), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]), + // imag(a) * real(x[i+1]), real(a) * real(x[i+1]) } + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]), + // real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) } + MULPS X1, X2 + MULPS X0, X3 + MULPS X11, X4 + MULPS X10, X5 + MULPS X1, X6 + MULPS X0, X7 + MULPS X11, X8 + MULPS X10, X9 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]), + // real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]), + // imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) } + ADDPS (DX)(AX*8), X3 + ADDPS 16(DX)(AX*8), X5 + ADDPS 32(DX)(AX*8), X7 + ADDPS 48(DX)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX // --BX + JNZ caxy_loop // } while BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // do { + MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } + MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(a)*real(x[i]) - imag(a)*imag(x[i]) } + ADDSUBPS_X2_X3 + MOVSD (DX)(AX*8), X4 // X3 += y[i] + ADDPS X4, X3 + MOVSD X3, (DI)(AX*8) // y[i] = X3 + INCQ AX // ++i + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go new file mode 100644 index 00000000..910e1e5c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go @@ -0,0 +1,7 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c64 + +func conj(c complex64) complex64 { return complex(real(c), -imag(c)) } diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go new file mode 100644 index 00000000..35f1b2a2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package c64 provides complex64 vector primitives. +package c64 // import "gonum.org/v1/gonum/internal/asm/c64" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s new file mode 100644 index 00000000..87de31d3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s @@ -0,0 +1,160 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 +#define MOVSHDUP_X5_X4 LONG $0xE5160FF3 // MOVSHDUP X5, X4 +#define MOVSHDUP_X7_X6 LONG $0xF7160FF3 // MOVSHDUP X7, X6 +#define MOVSHDUP_X9_X8 LONG $0x160F45F3; BYTE $0xC1 // MOVSHDUP X9, X8 + +#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 +#define MOVSLDUP_X5_X5 LONG $0xED120FF3 // MOVSLDUP X5, X5 +#define MOVSLDUP_X7_X7 LONG $0xFF120FF3 // MOVSLDUP X7, X7 +#define MOVSLDUP_X9_X9 LONG $0x120F45F3; BYTE $0xC9 // MOVSLDUP X9, X9 + +#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 +#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 +#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 +#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) +TEXT ·DotcInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ n+48(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE dotc_end + MOVQ ix+72(FP), INC_X + MOVQ iy+80(FP), INC_Y + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(complex64) + SHLQ $3, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(complex64) + SHLQ $3, INC_Y + MOVSS $(-1.0), NEG1 + SHUFPS $0, NEG1, NEG1 // { -1, -1, -1, -1 } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dotc_tail // if LEN == 0 { goto dotc_tail } + + MOVUPS NEG1, P_NEG1 // Copy NEG1 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dotc_loop: // do { + MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSD (X_PTR)(INC_X*1), X5 + MOVSD (X_PTR)(INC_X*2), X7 + MOVSD (X_PTR)(INCx3_X*1), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_(i-1) = { -imag(x[i]), -imag(x[i]) } + MULPS NEG1, X2 + MULPS P_NEG1, X4 + MULPS NEG1, X6 + MULPS P_NEG1, X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVSD (Y_PTR), X10 + MOVSD (Y_PTR)(INC_Y*1), X11 + MOVSD (Y_PTR)(INC_Y*2), X12 + MOVSD (Y_PTR)(INCx3_Y*1), X13 + + // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + MULPS X10, X3 + MULPS X11, X5 + MULPS X12, X7 + MULPS X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPS $0xB1, X10, X10 + SHUFPS $0xB1, X11, X11 + SHUFPS $0xB1, X12, X12 + SHUFPS $0xB1, X13, X13 + + // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + MULPS X10, X2 + MULPS X11, X4 + MULPS X12, X6 + MULPS X13, X8 + + // X_i = { + // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), + // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]) } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // SUM += X_i + ADDPS X3, SUM + ADDPS X5, P_SUM + ADDPS X7, SUM + ADDPS X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y*4]) + + DECQ LEN + JNZ dotc_loop // } while --LEN > 0 + + ADDPS P_SUM, SUM // SUM = { P_SUM + SUM } + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dotc_end + +dotc_tail: // do { + MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + ADDPS X3, SUM // SUM += X_i + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dotc_tail // } while --TAIL > 0 + +dotc_end: + MOVSD SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s new file mode 100644 index 00000000..d53479ca --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s @@ -0,0 +1,208 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVSLDUP_XPTR_IDX_8__X3 LONG $0x1C120FF3; BYTE $0xC6 // MOVSLDUP (SI)(AX*8), X3 +#define MOVSLDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF3; WORD $0x10C6 // MOVSLDUP 16(SI)(AX*8), X5 +#define MOVSLDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF3; WORD $0x20C6 // MOVSLDUP 32(SI)(AX*8), X7 +#define MOVSLDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F3; WORD $0xC64C; BYTE $0x30 // MOVSLDUP 48(SI)(AX*8), X9 + +#define MOVSHDUP_XPTR_IDX_8__X2 LONG $0x14160FF3; BYTE $0xC6 // MOVSHDUP (SI)(AX*8), X2 +#define MOVSHDUP_16_XPTR_IDX_8__X4 LONG $0x64160FF3; WORD $0x10C6 // MOVSHDUP 16(SI)(AX*8), X4 +#define MOVSHDUP_32_XPTR_IDX_8__X6 LONG $0x74160FF3; WORD $0x20C6 // MOVSHDUP 32(SI)(AX*8), X6 +#define MOVSHDUP_48_XPTR_IDX_8__X8 LONG $0x160F44F3; WORD $0xC644; BYTE $0x30 // MOVSHDUP 48(SI)(AX*8), X8 + +#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 +#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 + +#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 +#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 +#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 +#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcUnitary(x, y []complex64) (sum complex64) +TEXT ·DotcUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return } + JE dotc_end + XORQ IDX, IDX // i = 0 + MOVSS $(-1.0), NEG1 + SHUFPS $0, NEG1, NEG1 // { -1, -1, -1, -1 } + + MOVQ X_PTR, DX + ANDQ $15, DX // DX = &x & 15 + JZ dotc_aligned // if DX == 0 { goto dotc_aligned } + + MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + MOVAPS X3, SUM // SUM = X_i + INCQ IDX // IDX++ + DECQ LEN // LEN-- + JZ dotc_ret // if LEN == 0 { goto dotc_ret } + +dotc_aligned: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ dotc_tail // if LEN == 0 { return } + MOVUPS NEG1, P_NEG1 // Copy NEG1 for pipelining + +dotc_loop: // do { + MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSLDUP_16_XPTR_IDX_8__X5 + MOVSLDUP_32_XPTR_IDX_8__X7 + MOVSLDUP_48_XPTR_IDX_8__X9 + + MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i+1]), imag(x[i+1]) } + MOVSHDUP_16_XPTR_IDX_8__X4 + MOVSHDUP_32_XPTR_IDX_8__X6 + MOVSHDUP_48_XPTR_IDX_8__X8 + + // X_j = { imag(y[i]), real(y[i]), imag(y[i+1]), real(y[i+1]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_(i-1) = { -imag(x[i]), -imag(x[i]), -imag(x[i]+1), -imag(x[i]+1) } + MULPS NEG1, X2 + MULPS P_NEG1, X4 + MULPS NEG1, X6 + MULPS P_NEG1, X8 + + // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]), + // imag(y[i+1]) * real(x[i+1]), real(y[i+1]) * real(x[i+1]) } + MULPS X10, X3 + MULPS X11, X5 + MULPS X12, X7 + MULPS X13, X9 + + // X_j = { real(y[i]), imag(y[i]), real(y[i+1]), imag(y[i+1]) } + SHUFPS $0xB1, X10, X10 + SHUFPS $0xB1, X11, X11 + SHUFPS $0xB1, X12, X12 + SHUFPS $0xB1, X13, X13 + + // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]), + // real(y[i+1]) * imag(x[i+1]), imag(y[i+1]) * imag(x[i+1]) } + MULPS X10, X2 + MULPS X11, X4 + MULPS X12, X6 + MULPS X13, X8 + + // X_i = { + // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), + // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]), + // imag(result[i+1]): imag(y[i+1]) * real(x[i+1]) + real(y[i+1]) * imag(x[i+1]), + // real(result[i+1]): real(y[i+1]) * real(x[i+1]) - imag(y[i+1]) * imag(x[i+1]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // SUM += X_i + ADDPS X3, SUM + ADDPS X5, P_SUM + ADDPS X7, SUM + ADDPS X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dotc_loop // } while --LEN > 0 + + ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } + XORPS SUM, SUM // SUM = 0 + + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dotc_end + +dotc_tail: + MOVQ TAIL, LEN + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ dotc_tail_one // if LEN == 0 { goto dotc_tail_one } + +dotc_tail_two: // do { + MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0xB1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + ADDPS X3, SUM // SUM += X_i + + ADDQ $2, IDX // IDX += 2 + DECQ LEN + JNZ dotc_tail_two // } while --LEN > 0 + + ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } + XORPS SUM, SUM // SUM = 0 + + ANDQ $1, TAIL + JZ dotc_end + +dotc_tail_one: + MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + ADDPS X3, SUM // SUM += X_i + +dotc_end: + ADDPS P_SUM, SUM // SUM = { P_SUM[0] + SUM[0] } + MOVHLPS P_SUM, P_SUM // P_SUM = { P_SUM[1], P_SUM[1] } + ADDPS P_SUM, SUM // SUM = { P_SUM[1] + SUM[0] } + +dotc_ret: + MOVSD SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s new file mode 100644 index 00000000..bdee59be --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s @@ -0,0 +1,148 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 +#define MOVSHDUP_X5_X4 LONG $0xE5160FF3 // MOVSHDUP X5, X4 +#define MOVSHDUP_X7_X6 LONG $0xF7160FF3 // MOVSHDUP X7, X6 +#define MOVSHDUP_X9_X8 LONG $0x160F45F3; BYTE $0xC1 // MOVSHDUP X9, X8 + +#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 +#define MOVSLDUP_X5_X5 LONG $0xED120FF3 // MOVSLDUP X5, X5 +#define MOVSLDUP_X7_X7 LONG $0xFF120FF3 // MOVSLDUP X7, X7 +#define MOVSLDUP_X9_X9 LONG $0x120F45F3; BYTE $0xC9 // MOVSLDUP X9, X9 + +#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 +#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 +#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 +#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 + +// func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) +TEXT ·DotuInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ n+48(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE dotu_end + MOVQ ix+72(FP), INC_X + MOVQ iy+80(FP), INC_Y + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(complex64) + SHLQ $3, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(complex64) + SHLQ $3, INC_Y + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dotu_tail // if TAIL == 0 { goto dotu_tail } + + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dotu_loop: // do { + MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSD (X_PTR)(INC_X*1), X5 + MOVSD (X_PTR)(INC_X*2), X7 + MOVSD (X_PTR)(INCx3_X*1), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_j = { imag(y[i]), real(y[i]) } + MOVSD (Y_PTR), X10 + MOVSD (Y_PTR)(INC_Y*1), X11 + MOVSD (Y_PTR)(INC_Y*2), X12 + MOVSD (Y_PTR)(INCx3_Y*1), X13 + + // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + MULPS X10, X3 + MULPS X11, X5 + MULPS X12, X7 + MULPS X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPS $0xB1, X10, X10 + SHUFPS $0xB1, X11, X11 + SHUFPS $0xB1, X12, X12 + SHUFPS $0xB1, X13, X13 + + // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + MULPS X10, X2 + MULPS X11, X4 + MULPS X12, X6 + MULPS X13, X8 + + // X_i = { + // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), + // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]) } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // SUM += X_i + ADDPS X3, SUM + ADDPS X5, P_SUM + ADDPS X7, SUM + ADDPS X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y*4]) + + DECQ LEN + JNZ dotu_loop // } while --LEN > 0 + + ADDPS P_SUM, SUM // SUM = { P_SUM + SUM } + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dotu_end + +dotu_tail: // do { + MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + ADDPS X3, SUM // SUM += X_i + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dotu_tail // } while --TAIL > 0 + +dotu_end: + MOVSD SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s new file mode 100644 index 00000000..dce83a46 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s @@ -0,0 +1,197 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVSLDUP_XPTR_IDX_8__X3 LONG $0x1C120FF3; BYTE $0xC6 // MOVSLDUP (SI)(AX*8), X3 +#define MOVSLDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF3; WORD $0x10C6 // MOVSLDUP 16(SI)(AX*8), X5 +#define MOVSLDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF3; WORD $0x20C6 // MOVSLDUP 32(SI)(AX*8), X7 +#define MOVSLDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F3; WORD $0xC64C; BYTE $0x30 // MOVSLDUP 48(SI)(AX*8), X9 + +#define MOVSHDUP_XPTR_IDX_8__X2 LONG $0x14160FF3; BYTE $0xC6 // MOVSHDUP (SI)(AX*8), X2 +#define MOVSHDUP_16_XPTR_IDX_8__X4 LONG $0x64160FF3; WORD $0x10C6 // MOVSHDUP 16(SI)(AX*8), X4 +#define MOVSHDUP_32_XPTR_IDX_8__X6 LONG $0x74160FF3; WORD $0x20C6 // MOVSHDUP 32(SI)(AX*8), X6 +#define MOVSHDUP_48_XPTR_IDX_8__X8 LONG $0x160F44F3; WORD $0xC644; BYTE $0x30 // MOVSHDUP 48(SI)(AX*8), X8 + +#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 +#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 + +#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 +#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 +#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 +#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotuUnitary(x, y []complex64) (sum complex64) +TEXT ·DotuUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return } + JE dotu_end + XORQ IDX, IDX // IDX = 0 + + MOVQ X_PTR, DX + ANDQ $15, DX // DX = &x & 15 + JZ dotu_aligned // if DX == 0 { goto dotu_aligned } + + MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + MOVAPS X3, SUM // SUM = X_i + INCQ IDX // IDX++ + DECQ LEN // LEN-- + JZ dotu_end // if LEN == 0 { goto dotu_end } + +dotu_aligned: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ dotu_tail // if LEN == 0 { goto dotu_tail } + PXOR P_SUM, P_SUM + +dotu_loop: // do { + MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSLDUP_16_XPTR_IDX_8__X5 + MOVSLDUP_32_XPTR_IDX_8__X7 + MOVSLDUP_48_XPTR_IDX_8__X9 + + MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVSHDUP_16_XPTR_IDX_8__X4 + MOVSHDUP_32_XPTR_IDX_8__X6 + MOVSHDUP_48_XPTR_IDX_8__X8 + + // X_j = { imag(y[i]), real(y[i]), imag(y[i+1]), real(y[i+1]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]), + // imag(y[i+1]) * real(x[i+1]), real(y[i+1]) * real(x[i+1]) } + MULPS X10, X3 + MULPS X11, X5 + MULPS X12, X7 + MULPS X13, X9 + + // X_j = { real(y[i]), imag(y[i]), real(y[i+1]), imag(y[i+1]) } + SHUFPS $0xB1, X10, X10 + SHUFPS $0xB1, X11, X11 + SHUFPS $0xB1, X12, X12 + SHUFPS $0xB1, X13, X13 + + // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]), + // real(y[i+1]) * imag(x[i+1]), imag(y[i+1]) * imag(x[i+1]) } + MULPS X10, X2 + MULPS X11, X4 + MULPS X12, X6 + MULPS X13, X8 + + // X_i = { + // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), + // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]), + // imag(result[i+1]): imag(y[i+1]) * real(x[i+1]) + real(y[i+1]) * imag(x[i+1]), + // real(result[i+1]): real(y[i+1]) * real(x[i+1]) - imag(y[i+1]) * imag(x[i+1]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // SUM += X_i + ADDPS X3, SUM + ADDPS X5, P_SUM + ADDPS X7, SUM + ADDPS X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dotu_loop // } while --LEN > 0 + + ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } + XORPS SUM, SUM // SUM = 0 + + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dotu_end + +dotu_tail: + MOVQ TAIL, LEN + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ dotu_tail_one // if LEN == 0 { goto dotc_tail_one } + +dotu_tail_two: // do { + MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0xB1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + ADDPS X3, SUM // SUM += X_i + + ADDQ $2, IDX // IDX += 2 + DECQ LEN + JNZ dotu_tail_two // } while --LEN > 0 + + ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } + XORPS SUM, SUM // SUM = 0 + + ANDQ $1, TAIL + JZ dotu_end + +dotu_tail_one: + MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + ADDPS X3, SUM // SUM += X_i + +dotu_end: + ADDPS P_SUM, SUM // SUM = { P_SUM[0] + SUM[0] } + MOVHLPS P_SUM, P_SUM // P_SUM = { P_SUM[1], P_SUM[1] } + ADDPS P_SUM, SUM // SUM = { P_SUM[1] + SUM[0] } + +dotu_ret: + MOVSD SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go new file mode 100644 index 00000000..a84def87 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go @@ -0,0 +1,79 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c64 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha complex64, x []complex64) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []complex64, alpha complex64, x []complex64) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha complex64, x []complex64, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []complex64, incDst uintptr, alpha complex64, x []complex64, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} + +// SscalUnitary is +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } +func SscalUnitary(alpha float32, x []complex64) { + for i, v := range x { + x[i] = complex(real(v)*alpha, imag(v)*alpha) + } +} + +// SscalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } +func SscalInc(alpha float32, x []complex64, n, inc uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) + ix += inc + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go new file mode 100644 index 00000000..3e12d6bc --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go @@ -0,0 +1,68 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package c64 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex64, x, y []complex64) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * conj(v) +// } +// return sum +func DotcUnitary(x, y []complex64) (sum complex64) + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex64) (sum complex64) + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go new file mode 100644 index 00000000..411afcb2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package c64 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex64, x, y []complex64) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * conj(v) +// } +// return sum +func DotcUnitary(x, y []complex64) (sum complex64) { + for i, v := range x { + sum += y[i] * conj(v) + } + return sum +} + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { + for i := 0; i < int(n); i++ { + sum += y[iy] * conj(x[ix]) + ix += incX + iy += incY + } + return sum +} + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex64) (sum complex64) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s new file mode 100644 index 00000000..2d167c08 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s @@ -0,0 +1,73 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ n+56(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JLE axpyi_end + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ ix+80(FP), R8 // R8 = ix + MOVQ iy+88(FP), R9 // R9 = iy + LEAQ (SI)(R8*4), SI // SI = &(x[ix]) + LEAQ (DI)(R9*4), DI // DI = &(y[iy]) + MOVQ DI, DX // DX = DI Read Pointer for y + MOVQ incX+64(FP), R8 // R8 = incX + SHLQ $2, R8 // R8 *= sizeof(float32) + MOVQ incY+72(FP), R9 // R9 = incY + SHLQ $2, R9 // R9 *= sizeof(float32) + MOVSS alpha+0(FP), X0 // X0 = alpha + MOVSS X0, X1 // X1 = X0 // for pipelining + MOVQ CX, BX + ANDQ $3, BX // BX = n % 4 + SHRQ $2, CX // CX = floor( n / 4 ) + JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start } + +axpyi_loop: // Loop unrolled 4x do { + MOVSS (SI), X2 // X_i = x[i] + MOVSS (SI)(R8*1), X3 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSS (SI), X4 + MOVSS (SI)(R8*1), X5 + MULSS X1, X2 // X_i *= a + MULSS X0, X3 + MULSS X1, X4 + MULSS X0, X5 + ADDSS (DX), X2 // X_i += y[i] + ADDSS (DX)(R9*1), X3 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDSS (DX), X4 + ADDSS (DX)(R9*1), X5 + MOVSS X2, (DI) // y[i] = X_i + MOVSS X3, (DI)(R9*1) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + MOVSS X4, (DI) + MOVSS X5, (DI)(R9*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + LOOP axpyi_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpyi_end + +axpyi_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +axpyi_tail: // do { + MOVSS (SI), X2 // X2 = x[i] + MULSS X1, X2 // X2 *= a + ADDSS (DI), X2 // X2 += y[i] + MOVSS X2, (DI) // y[i] = X2 + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s new file mode 100644 index 00000000..b79f9926 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s @@ -0,0 +1,78 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ n+96(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JLE axpyi_end + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+48(FP), SI // SI = &x + MOVQ y_base+72(FP), DX // DX = &y + MOVQ ix+120(FP), R8 // R8 = ix // Load the first index + MOVQ iy+128(FP), R9 // R9 = iy + MOVQ idst+32(FP), R10 // R10 = idst + LEAQ (SI)(R8*4), SI // SI = &(x[ix]) + LEAQ (DX)(R9*4), DX // DX = &(y[iy]) + LEAQ (DI)(R10*4), DI // DI = &(dst[idst]) + MOVQ incX+104(FP), R8 // R8 = incX + SHLQ $2, R8 // R8 *= sizeof(float32) + MOVQ incY+112(FP), R9 // R9 = incY + SHLQ $2, R9 // R9 *= sizeof(float32) + MOVQ incDst+24(FP), R10 // R10 = incDst + SHLQ $2, R10 // R10 *= sizeof(float32) + MOVSS alpha+40(FP), X0 // X0 = alpha + MOVSS X0, X1 // X1 = X0 // for pipelining + MOVQ CX, BX + ANDQ $3, BX // BX = n % 4 + SHRQ $2, CX // CX = floor( n / 4 ) + JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start } + +axpyi_loop: // Loop unrolled 4x do { + MOVSS (SI), X2 // X_i = x[i] + MOVSS (SI)(R8*1), X3 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSS (SI), X4 + MOVSS (SI)(R8*1), X5 + MULSS X1, X2 // X_i *= a + MULSS X0, X3 + MULSS X1, X4 + MULSS X0, X5 + ADDSS (DX), X2 // X_i += y[i] + ADDSS (DX)(R9*1), X3 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDSS (DX), X4 + ADDSS (DX)(R9*1), X5 + MOVSS X2, (DI) // dst[i] = X_i + MOVSS X3, (DI)(R10*1) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + MOVSS X4, (DI) + MOVSS X5, (DI)(R10*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + LOOP axpyi_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpyi_end + +axpyi_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +axpyi_tail: // do { + MOVSS (SI), X2 // X2 = x[i] + MULSS X1, X2 // X2 *= a + ADDSS (DX), X2 // X2 += y[i] + MOVSS X2, (DI) // dst[i] = X2 + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DX // DX = &(DX[incY]) + ADDQ R10, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s new file mode 100644 index 00000000..97df90a0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s @@ -0,0 +1,97 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyUnitary(alpha float32, x, y []float32) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ x_len+16(FP), BX // BX = min( len(x), len(y) ) + CMPQ y_len+40(FP), BX + CMOVQLE y_len+40(FP), BX + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + MOVSS alpha+0(FP), X0 + SHUFPS $0, X0, X0 // X0 = { a, a, a, a } + XORQ AX, AX // i = 0 + PXOR X2, X2 // 2 NOP instructions (PXOR) to align + PXOR X3, X3 // loop to cache line + MOVQ DI, CX + ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS + JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim } + + XORQ $0xF, CX // CX = 4 - floor( BX % 16 / 4 ) + INCQ CX + SHRQ $2, CX + +axpy_align: // Trim first value(s) in unaligned buffer do { + MOVSS (SI)(AX*4), X2 // X2 = x[i] + MULSS X0, X2 // X2 *= a + ADDSS (DI)(AX*4), X2 // X2 += y[i] + MOVSS X2, (DI)(AX*4) // y[i] = X2 + INCQ AX // i++ + DECQ BX + JZ axpy_end // if --BX == 0 { return } + LOOP axpy_align // } while --CX > 0 + +axpy_no_trim: + MOVUPS X0, X1 // Copy X0 to X1 for pipelining + MOVQ BX, CX + ANDQ $0xF, BX // BX = len % 16 + SHRQ $4, CX // CX = int( len / 16 ) + JZ axpy_tail4_start // if CX == 0 { return } + +axpy_loop: // Loop unrolled 16x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4] + MOVUPS 16(SI)(AX*4), X3 + MOVUPS 32(SI)(AX*4), X4 + MOVUPS 48(SI)(AX*4), X5 + MULPS X0, X2 // X2 *= a + MULPS X1, X3 + MULPS X0, X4 + MULPS X1, X5 + ADDPS (DI)(AX*4), X2 // X2 += y[i:i+4] + ADDPS 16(DI)(AX*4), X3 + ADDPS 32(DI)(AX*4), X4 + ADDPS 48(DI)(AX*4), X5 + MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2 + MOVUPS X3, 16(DI)(AX*4) + MOVUPS X4, 32(DI)(AX*4) + MOVUPS X5, 48(DI)(AX*4) + ADDQ $16, AX // i += 16 + LOOP axpy_loop // while (--CX) > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + +axpy_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ BX, CX // CX = floor( BX / 4 ) + SHRQ $2, CX + JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start } + +axpy_tail4: // Loop unrolled 4x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i] + MULPS X0, X2 // X2 *= a + ADDPS (DI)(AX*4), X2 // X2 += y[i] + MOVUPS X2, (DI)(AX*4) // y[i] = X2 + ADDQ $4, AX // i += 4 + LOOP axpy_tail4 // } while --CX > 0 + +axpy_tail_start: // Reset loop counter for 1-wide tail loop + MOVQ BX, CX // CX = BX % 4 + ANDQ $3, CX + JZ axpy_end // if CX == 0 { return } + +axpy_tail: + MOVSS (SI)(AX*4), X1 // X1 = x[i] + MULSS X0, X1 // X1 *= a + ADDSS (DI)(AX*4), X1 // X1 += y[i] + MOVSS X1, (DI)(AX*4) // y[i] = X1 + INCQ AX // i++ + LOOP axpy_tail // } while --CX > 0 + +axpy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s new file mode 100644 index 00000000..a826ca31 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s @@ -0,0 +1,98 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+32(FP), SI // SI = &x + MOVQ y_base+56(FP), DX // DX = &y + MOVQ x_len+40(FP), BX // BX = min( len(x), len(y), len(dst) ) + CMPQ y_len+64(FP), BX + CMOVQLE y_len+64(FP), BX + CMPQ dst_len+8(FP), BX + CMOVQLE dst_len+8(FP), BX + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + MOVSS alpha+24(FP), X0 + SHUFPS $0, X0, X0 // X0 = { a, a, a, a, } + XORQ AX, AX // i = 0 + MOVQ DX, CX + ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS + JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim } + + XORQ $0xF, CX // CX = 4 - floor ( B % 16 / 4 ) + INCQ CX + SHRQ $2, CX + +axpy_align: // Trim first value(s) in unaligned buffer do { + MOVSS (SI)(AX*4), X2 // X2 = x[i] + MULSS X0, X2 // X2 *= a + ADDSS (DX)(AX*4), X2 // X2 += y[i] + MOVSS X2, (DI)(AX*4) // y[i] = X2 + INCQ AX // i++ + DECQ BX + JZ axpy_end // if --BX == 0 { return } + LOOP axpy_align // } while --CX > 0 + +axpy_no_trim: + MOVUPS X0, X1 // Copy X0 to X1 for pipelining + MOVQ BX, CX + ANDQ $0xF, BX // BX = len % 16 + SHRQ $4, CX // CX = floor( len / 16 ) + JZ axpy_tail4_start // if CX == 0 { return } + +axpy_loop: // Loop unrolled 16x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4] + MOVUPS 16(SI)(AX*4), X3 + MOVUPS 32(SI)(AX*4), X4 + MOVUPS 48(SI)(AX*4), X5 + MULPS X0, X2 // X2 *= a + MULPS X1, X3 + MULPS X0, X4 + MULPS X1, X5 + ADDPS (DX)(AX*4), X2 // X2 += y[i:i+4] + ADDPS 16(DX)(AX*4), X3 + ADDPS 32(DX)(AX*4), X4 + ADDPS 48(DX)(AX*4), X5 + MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2 + MOVUPS X3, 16(DI)(AX*4) + MOVUPS X4, 32(DI)(AX*4) + MOVUPS X5, 48(DI)(AX*4) + ADDQ $16, AX // i += 16 + LOOP axpy_loop // while (--CX) > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + +axpy_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ BX, CX // CX = floor( BX / 4 ) + SHRQ $2, CX + JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start } + +axpy_tail4: // Loop unrolled 4x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i] + MULPS X0, X2 // X2 *= a + ADDPS (DX)(AX*4), X2 // X2 += y[i] + MOVUPS X2, (DI)(AX*4) // y[i] = X2 + ADDQ $4, AX // i += 4 + LOOP axpy_tail4 // } while --CX > 0 + +axpy_tail_start: // Reset loop counter for 1-wide tail loop + MOVQ BX, CX // CX = BX % 4 + ANDQ $3, CX + JZ axpy_end // if CX == 0 { return } + +axpy_tail: + MOVSS (SI)(AX*4), X1 // X1 = x[i] + MULSS X0, X1 // X1 *= a + ADDSS (DX)(AX*4), X1 // X1 += y[i] + MOVSS X1, (DI)(AX*4) // y[i] = X1 + INCQ AX // i++ + LOOP axpy_tail // } while --CX > 0 + +axpy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s new file mode 100644 index 00000000..4518e049 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s @@ -0,0 +1,91 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R10 +#define INC_Y R9 +#define INCx3_Y R11 +#define SUM X0 +#define P_SUM X1 + +// func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) +TEXT ·DdotInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 + JE dot_end + + MOVQ ix+72(FP), INC_X // INC_X = ix + MOVQ iy+80(FP), INC_Y // INC_Y = iy + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy]) + + MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(float32) + SHLQ $2, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(float32) + SHLQ $2, INC_Y + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN == 0 { goto dot_tail } + + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dot_loop: // Loop unrolled 4x do { + CVTSS2SD (X_PTR), X2 // X_i = x[i:i+1] + CVTSS2SD (X_PTR)(INC_X*1), X3 + CVTSS2SD (X_PTR)(INC_X*2), X4 + CVTSS2SD (X_PTR)(INCx3_X*1), X5 + + CVTSS2SD (Y_PTR), X6 // X_j = y[i:i+1] + CVTSS2SD (Y_PTR)(INC_Y*1), X7 + CVTSS2SD (Y_PTR)(INC_Y*2), X8 + CVTSS2SD (Y_PTR)(INCx3_Y*1), X9 + + MULSD X6, X2 // X_i *= X_j + MULSD X7, X3 + MULSD X8, X4 + MULSD X9, X5 + + ADDSD X2, SUM // SUM += X_i + ADDSD X3, P_SUM + ADDSD X4, SUM + ADDSD X5, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDSD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + CVTSS2SD (X_PTR), X2 // X2 = x[i] + CVTSS2SD (Y_PTR), X3 // X2 *= y[i] + MULSD X3, X2 + ADDSD X2, SUM // SUM += X2 + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVSD SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s new file mode 100644 index 00000000..231cbd3b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s @@ -0,0 +1,110 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define HADDPD_SUM_SUM LONG $0xC07C0F66 // @ HADDPD X0, X0 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define IDX AX +#define SUM X0 +#define P_SUM X1 + +// func DdotUnitary(x, y []float32) (sum float32) +TEXT ·DdotUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // psum = 0 + CMPQ LEN, $0 + JE dot_end + + XORQ IDX, IDX + MOVQ Y_PTR, DX + ANDQ $0xF, DX // Align on 16-byte boundary for ADDPS + JZ dot_no_trim // if DX == 0 { goto dot_no_trim } + + SUBQ $16, DX + +dot_align: // Trim first value(s) in unaligned buffer do { + CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i]) + CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i]) + MULSD X3, X2 + ADDSD X2, SUM // SUM += X2 + INCQ IDX // IDX++ + DECQ LEN + JZ dot_end // if --TAIL == 0 { return } + ADDQ $4, DX + JNZ dot_align // } while --LEN > 0 + +dot_no_trim: + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + MOVQ LEN, TAIL + ANDQ $0x7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ dot_tail_start // if LEN == 0 { goto dot_tail_start } + +dot_loop: // Loop unrolled 8x do { + CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + CVTPS2PD 8(X_PTR)(IDX*4), X3 + CVTPS2PD 16(X_PTR)(IDX*4), X4 + CVTPS2PD 24(X_PTR)(IDX*4), X5 + + CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1] + CVTPS2PD 8(Y_PTR)(IDX*4), X7 + CVTPS2PD 16(Y_PTR)(IDX*4), X8 + CVTPS2PD 24(Y_PTR)(IDX*4), X9 + + MULPD X6, X2 // X_i *= X_j + MULPD X7, X3 + MULPD X8, X4 + MULPD X9, X5 + + ADDPD X2, SUM // SUM += X_i + ADDPD X3, P_SUM + ADDPD X4, SUM + ADDPD X5, P_SUM + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDPD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail_start: + MOVQ TAIL, LEN + SHRQ $1, LEN + JZ dot_tail_one + +dot_tail_two: + CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1] + MULPD X6, X2 // X_i *= X_j + ADDPD X2, SUM // SUM += X_i + ADDQ $2, IDX // IDX += 2 + DECQ LEN + JNZ dot_tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ dot_end + +dot_tail_one: + CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i]) + CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i]) + MULSD X3, X2 // X2 *= X3 + ADDSD X2, SUM // SUM += X2 + +dot_end: + HADDPD_SUM_SUM // SUM = \sum{ SUM[i] } + MOVSD SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go new file mode 100644 index 00000000..408847a6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f32 provides float32 vector primitives. +package f32 // import "gonum.org/v1/gonum/internal/asm/f32" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s new file mode 100644 index 00000000..4d36b289 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s @@ -0,0 +1,85 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R10 +#define INC_Y R9 +#define INCx3_Y R11 +#define SUM X0 +#define P_SUM X1 + +// func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) +TEXT ·DotInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + MOVQ n+48(FP), LEN // LEN = n + CMPQ LEN, $0 + JE dot_end + + MOVQ ix+72(FP), INC_X // INC_X = ix + MOVQ iy+80(FP), INC_Y // INC_Y = iy + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy]) + + MOVQ incX+56(FP), INC_X // INC_X := incX * sizeof(float32) + SHLQ $2, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y := incY * sizeof(float32) + SHLQ $2, INC_Y + + MOVQ LEN, TAIL + ANDQ $0x3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN == 0 { goto dot_tail } + + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dot_loop: // Loop unrolled 4x do { + MOVSS (X_PTR), X2 // X_i = x[i:i+1] + MOVSS (X_PTR)(INC_X*1), X3 + MOVSS (X_PTR)(INC_X*2), X4 + MOVSS (X_PTR)(INCx3_X*1), X5 + + MULSS (Y_PTR), X2 // X_i *= y[i:i+1] + MULSS (Y_PTR)(INC_Y*1), X3 + MULSS (Y_PTR)(INC_Y*2), X4 + MULSS (Y_PTR)(INCx3_Y*1), X5 + + ADDSS X2, SUM // SUM += X_i + ADDSS X3, P_SUM + ADDSS X4, SUM + ADDSS X5, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDSS P_SUM, SUM // P_SUM += SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVSS (X_PTR), X2 // X2 = x[i] + MULSS (Y_PTR), X2 // X2 *= y[i] + ADDSS X2, SUM // SUM += X2 + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVSS SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s new file mode 100644 index 00000000..c32ede5a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s @@ -0,0 +1,106 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define HADDPS_SUM_SUM LONG $0xC07C0FF2 // @ HADDPS X0, X0 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define IDX AX +#define SUM X0 +#define P_SUM X1 + +// func DotUnitary(x, y []float32) (sum float32) +TEXT ·DotUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + CMPQ LEN, $0 + JE dot_end + + XORQ IDX, IDX + MOVQ Y_PTR, DX + ANDQ $0xF, DX // Align on 16-byte boundary for MULPS + JZ dot_no_trim // if DX == 0 { goto dot_no_trim } + SUBQ $16, DX + +dot_align: // Trim first value(s) in unaligned buffer do { + MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i] + MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i] + ADDSS X2, SUM // SUM += X2 + INCQ IDX // IDX++ + DECQ LEN + JZ dot_end // if --TAIL == 0 { return } + ADDQ $4, DX + JNZ dot_align // } while --DX > 0 + +dot_no_trim: + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + MOVQ LEN, TAIL + ANDQ $0xF, TAIL // TAIL = LEN % 16 + SHRQ $4, LEN // LEN = floor( LEN / 16 ) + JZ dot_tail4_start // if LEN == 0 { goto dot_tail4_start } + +dot_loop: // Loop unrolled 16x do { + MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + MOVUPS 16(X_PTR)(IDX*4), X3 + MOVUPS 32(X_PTR)(IDX*4), X4 + MOVUPS 48(X_PTR)(IDX*4), X5 + + MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1] + MULPS 16(Y_PTR)(IDX*4), X3 + MULPS 32(Y_PTR)(IDX*4), X4 + MULPS 48(Y_PTR)(IDX*4), X5 + + ADDPS X2, SUM // SUM += X_i + ADDPS X3, P_SUM + ADDPS X4, SUM + ADDPS X5, P_SUM + + ADDQ $16, IDX // IDX += 16 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDPS P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ TAIL, LEN // LEN = floor( TAIL / 4 ) + SHRQ $2, LEN + JZ dot_tail_start // if LEN == 0 { goto dot_tail_start } + +dot_tail4_loop: // Loop unrolled 4x do { + MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1] + ADDPS X2, SUM // SUM += X_i + ADDQ $4, IDX // i += 4 + DECQ LEN + JNZ dot_tail4_loop // } while --LEN > 0 + +dot_tail_start: // Reset loop counter for 1-wide tail loop + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ dot_end // if TAIL == 0 { return } + +dot_tail: // do { + MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i] + MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i] + ADDSS X2, SUM // psum += X2 + INCQ IDX // IDX++ + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + HADDPS_SUM_SUM // SUM = \sum{ SUM[i] } + HADDPS_SUM_SUM + MOVSS SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go new file mode 100644 index 00000000..2b336a2a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go @@ -0,0 +1,15 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f32 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float32, + x []float32, incX uintptr, + y []float32, incY uintptr, + a []float32, lda uintptr) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s new file mode 100644 index 00000000..e5e80c52 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s @@ -0,0 +1,757 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 4 +#define BITSIZE 2 +#define KERNELSIZE 3 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define Y y_base+56(FP) +#define Y_PTR DX +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X0 +#define ALPHA_SPILL al-16(SP) + +#define LOAD_ALPHA \ + MOVSS alpha+16(FP), ALPHA \ + SHUFPS $0, ALPHA, ALPHA + +#define LOAD_SCALED4 \ + PREFETCHNTA 16*SIZE(X_PTR) \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP 2*SIZE(X_PTR), X3 \ + MOVSHDUP X1, X2 \ + MOVSHDUP X3, X4 \ + MOVSLDUP X1, X1 \ + MOVSLDUP X3, X3 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 \ + MULPS ALPHA, X3 \ + MULPS ALPHA, X4 + +#define LOAD_SCALED2 \ + MOVDDUP (X_PTR), X1 \ + MOVSHDUP X1, X2 \ + MOVSLDUP X1, X1 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 + +#define LOAD_SCALED1 \ + MOVSS (X_PTR), X1 \ + SHUFPS $0, X1, X1 \ + MULPS ALPHA, X1 + +#define LOAD_SCALED4_INC \ + PREFETCHNTA (X_PTR)(INC_X*8) \ + MOVSS (X_PTR), X1 \ + MOVSS (X_PTR)(INC_X*1), X2 \ + MOVSS (X_PTR)(INC_X*2), X3 \ + MOVSS (X_PTR)(INC3_X*1), X4 \ + SHUFPS $0, X1, X1 \ + SHUFPS $0, X2, X2 \ + SHUFPS $0, X3, X3 \ + SHUFPS $0, X4, X4 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 \ + MULPS ALPHA, X3 \ + MULPS ALPHA, X4 + +#define LOAD_SCALED2_INC \ + MOVSS (X_PTR), X1 \ + MOVSS (X_PTR)(INC_X*1), X2 \ + SHUFPS $0, X1, X1 \ + SHUFPS $0, X2, X2 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 + +#define KERNEL_LOAD8 \ + MOVUPS (Y_PTR), X5 \ + MOVUPS 4*SIZE(Y_PTR), X6 + +#define KERNEL_LOAD8_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + MOVSS (Y_PTR)(INC_Y*2), X7 \ + MOVSS (Y_PTR)(INC3_Y*1), X8 \ + UNPCKLPS X6, X5 \ + UNPCKLPS X8, X7 \ + MOVLHPS X7, X5 \ + LEAQ (Y_PTR)(INC_Y*4), Y_PTR \ + MOVSS (Y_PTR), X6 \ + MOVSS (Y_PTR)(INC_Y*1), X7 \ + MOVSS (Y_PTR)(INC_Y*2), X8 \ + MOVSS (Y_PTR)(INC3_Y*1), X9 \ + UNPCKLPS X7, X6 \ + UNPCKLPS X9, X8 \ + MOVLHPS X8, X6 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X5 + +#define KERNEL_LOAD4_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + MOVSS (Y_PTR)(INC_Y*2), X7 \ + MOVSS (Y_PTR)(INC3_Y*1), X8 \ + UNPCKLPS X6, X5 \ + UNPCKLPS X8, X7 \ + MOVLHPS X7, X5 + +#define KERNEL_LOAD2 \ + MOVSD (Y_PTR), X5 + +#define KERNEL_LOAD2_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + UNPCKLPS X6, X5 + +#define KERNEL_4x8 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MOVUPS X5, X9 \ + MOVUPS X6, X10 \ + MOVUPS X5, X11 \ + MOVUPS X6, X12 \ + MULPS X1, X5 \ + MULPS X1, X6 \ + MULPS X2, X7 \ + MULPS X2, X8 \ + MULPS X3, X9 \ + MULPS X3, X10 \ + MULPS X4, X11 \ + MULPS X4, X12 + +#define STORE_4x8 \ + MOVUPS ALPHA, ALPHA_SPILL \ + MOVUPS (A_PTR), X13 \ + ADDPS X13, X5 \ + MOVUPS 4*SIZE(A_PTR), X14 \ + ADDPS X14, X6 \ + MOVUPS (A_PTR)(LDA*1), X15 \ + ADDPS X15, X7 \ + MOVUPS 4*SIZE(A_PTR)(LDA*1), X0 \ + ADDPS X0, X8 \ + MOVUPS (A_PTR)(LDA*2), X13 \ + ADDPS X13, X9 \ + MOVUPS 4*SIZE(A_PTR)(LDA*2), X14 \ + ADDPS X14, X10 \ + MOVUPS (A_PTR)(LDA3*1), X15 \ + ADDPS X15, X11 \ + MOVUPS 4*SIZE(A_PTR)(LDA3*1), X0 \ + ADDPS X0, X12 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \ + MOVUPS X9, (A_PTR)(LDA*2) \ + MOVUPS X10, 4*SIZE(A_PTR)(LDA*2) \ + MOVUPS X11, (A_PTR)(LDA3*1) \ + MOVUPS X12, 4*SIZE(A_PTR)(LDA3*1) \ + MOVUPS ALPHA_SPILL, ALPHA \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_4x4 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPS X1, X5 \ + MULPS X2, X6 \ + MULPS X3, X7 \ + MULPS X4, X8 + +#define STORE_4x4 \ + MOVUPS (A_PTR), X13 \ + ADDPS X13, X5 \ + MOVUPS (A_PTR)(LDA*1), X14 \ + ADDPS X14, X6 \ + MOVUPS (A_PTR)(LDA*2), X15 \ + ADDPS X15, X7 \ + MOVUPS (A_PTR)(LDA3*1), X13 \ + ADDPS X13, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + MOVUPS X7, (A_PTR)(LDA*2) \ + MOVUPS X8, (A_PTR)(LDA3*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPS X1, X5 \ + MULPS X2, X6 \ + MULPS X3, X7 \ + MULPS X4, X8 + +#define STORE_4x2 \ + MOVSD (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVSD (A_PTR)(LDA*1), X10 \ + ADDPS X10, X6 \ + MOVSD (A_PTR)(LDA*2), X11 \ + ADDPS X11, X7 \ + MOVSD (A_PTR)(LDA3*1), X12 \ + ADDPS X12, X8 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + MOVSD X7, (A_PTR)(LDA*2) \ + MOVSD X8, (A_PTR)(LDA3*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVSS (Y_PTR), X5 \ + MOVSS X5, X6 \ + MOVSS X5, X7 \ + MOVSS X5, X8 \ + MULSS X1, X5 \ + MULSS X2, X6 \ + MULSS X3, X7 \ + MULSS X4, X8 + +#define STORE_4x1 \ + ADDSS (A_PTR), X5 \ + ADDSS (A_PTR)(LDA*1), X6 \ + ADDSS (A_PTR)(LDA*2), X7 \ + ADDSS (A_PTR)(LDA3*1), X8 \ + MOVSS X5, (A_PTR) \ + MOVSS X6, (A_PTR)(LDA*1) \ + MOVSS X7, (A_PTR)(LDA*2) \ + MOVSS X8, (A_PTR)(LDA3*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_2x8 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MULPS X1, X5 \ + MULPS X1, X6 \ + MULPS X2, X7 \ + MULPS X2, X8 + +#define STORE_2x8 \ + MOVUPS (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVUPS 4*SIZE(A_PTR), X10 \ + ADDPS X10, X6 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPS X11, X7 \ + MOVUPS 4*SIZE(A_PTR)(LDA*1), X12 \ + ADDPS X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_2x4 \ + MOVUPS X5, X6 \ + MULPS X1, X5 \ + MULPS X2, X6 + +#define STORE_2x4 \ + MOVUPS (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPS X11, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVSD X5, X6 \ + MULPS X1, X5 \ + MULPS X2, X6 + +#define STORE_2x2 \ + MOVSD (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVSD (A_PTR)(LDA*1), X8 \ + ADDPS X8, X6 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVSS (Y_PTR), X5 \ + MOVSS X5, X6 \ + MULSS X1, X5 \ + MULSS X2, X6 + +#define STORE_2x1 \ + ADDSS (A_PTR), X5 \ + ADDSS (A_PTR)(LDA*1), X6 \ + MOVSS X5, (A_PTR) \ + MOVSS X6, (A_PTR)(LDA*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x8 \ + MULPS X1, X5 \ + MULPS X1, X6 + +#define STORE_1x8 \ + MOVUPS (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVUPS 4*SIZE(A_PTR), X8 \ + ADDPS X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_1x4 \ + MULPS X1, X5 \ + MULPS X1, X6 + +#define STORE_1x4 \ + MOVUPS (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVUPS X5, (A_PTR) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MULPS X1, X5 + +#define STORE_1x2 \ + MOVSD (A_PTR), X6 \ + ADDPS X6, X5 \ + MOVSD X5, (A_PTR) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSS (Y_PTR), X5 \ + MULSS X1, X5 + +#define STORE_1x1 \ + ADDSS (A_PTR), X5 \ + MOVSS X5, (A_PTR) \ + ADDQ $SIZE, A_PTR + +// func Ger(m, n uintptr, alpha float32, +// x []float32, incX uintptr, +// y []float32, incY uintptr, +// a []float32, lda uintptr) +TEXT ·Ger(SB), 0, $16-120 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + LOAD_ALPHA + + MOVQ x_base+24(FP), X_PTR + MOVQ y_base+56(FP), Y_PTR + MOVQ a_base+88(FP), A_ROW + MOVQ A_ROW, A_PTR + MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float32) + SHLQ $BITSIZE, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + + CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) + JNE inc + CMPQ incX+48(FP), $1 // Check for dense vector X (fast-path) + JNE inc + + SHRQ $2, M + JZ r2 + +r4: + + // LOAD 4 + LOAD_SCALED4 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r4c4 + +r4c8: + // 4x8 KERNEL + KERNEL_LOAD8 + KERNEL_4x8 + STORE_4x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r4c8 + +r4c4: + TESTQ $4, N_DIM + JZ r4c2 + + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE_4x4 + + ADDQ $4*SIZE, Y_PTR + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + STORE_4x2 + + ADDQ $2*SIZE, Y_PTR + +r4c1: + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ $SIZE, Y_PTR + +r4end: + ADDQ $4*SIZE, X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + LOAD_SCALED2 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r2c4 + +r2c8: + // 2x8 KERNEL + KERNEL_LOAD8 + KERNEL_2x8 + STORE_2x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r2c8 + +r2c4: + TESTQ $4, N_DIM + JZ r2c2 + + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + STORE_2x4 + + ADDQ $4*SIZE, Y_PTR + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE_2x2 + + ADDQ $2*SIZE, Y_PTR + +r2c1: + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ $SIZE, Y_PTR + +r2end: + ADDQ $2*SIZE, X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD_SCALED1 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r1c4 + +r1c8: + // 1x8 KERNEL + KERNEL_LOAD8 + KERNEL_1x8 + STORE_1x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r1c8 + +r1c4: + TESTQ $4, N_DIM + JZ r1c2 + + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + STORE_1x4 + + ADDQ $4*SIZE, Y_PTR + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + STORE_1x2 + + ADDQ $2*SIZE, Y_PTR + +r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + +end: + RET + +inc: // Algorithm for incY != 0 ( split loads in kernel ) + + MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float32) + SHLQ $BITSIZE, INC_X + MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float32) + SHLQ $BITSIZE, INC_Y + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_X, TMP1 + NEGQ TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + LOAD_SCALED4_INC + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r4c4 + +inc_r4c8: + // 4x4 KERNEL + KERNEL_LOAD8_INC + KERNEL_4x8 + STORE_4x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r4c8 + +inc_r4c4: + TESTQ $4, N_DIM + JZ inc_r4c2 + + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE_4x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + STORE_4x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r4c1: + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ INC_Y, Y_PTR + +inc_r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + LOAD_SCALED2_INC + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r2c4 + +inc_r2c8: + // 2x8 KERNEL + KERNEL_LOAD8_INC + KERNEL_2x8 + STORE_2x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r2c8 + +inc_r2c4: + TESTQ $4, N_DIM + JZ inc_r2c2 + + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + STORE_2x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE_2x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2c1: + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ INC_Y, Y_PTR + +inc_r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD_SCALED1 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r1c4 + +inc_r1c8: + // 1x8 KERNEL + KERNEL_LOAD8_INC + KERNEL_1x8 + STORE_1x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r1c8 + +inc_r1c4: + TESTQ $4, N_DIM + JZ inc_r1c2 + + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + STORE_1x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + STORE_1x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ inc_end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go new file mode 100644 index 00000000..d92f9968 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go @@ -0,0 +1,36 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f32 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float32, x []float32, incX uintptr, y []float32, incY uintptr, a []float32, lda uintptr) { + + if incX == 1 && incY == 1 { + x = x[:m] + y = y[:n] + for i, xv := range x { + AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) + } + return + } + + var ky, kx uintptr + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + + ix := kx + for i := 0; i < int(m); i++ { + AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], uintptr(n), uintptr(incY), 1, uintptr(ky), 0) + ix += incX + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go new file mode 100644 index 00000000..d0867a46 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go @@ -0,0 +1,55 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package f32 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float32, x []float32) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float32, alpha float32, x []float32) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float32, x []float32, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float32, incDst uintptr, alpha float32, x []float32, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go new file mode 100644 index 00000000..fcbce09e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go @@ -0,0 +1,68 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f32 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float32, x, y []float32) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) + +// DdotUnitary is +// for i, v := range x { +// sum += float64(y[i]) * float64(v) +// } +// return +func DdotUnitary(x, y []float32) (sum float64) + +// DdotInc is +// for i := 0; i < int(n); i++ { +// sum += float64(y[iy]) * float64(x[ix]) +// ix += incX +// iy += incY +// } +// return +func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float32) (sum float32) + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go new file mode 100644 index 00000000..3b5b0970 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f32 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float32, x, y []float32) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float32) (sum float32) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} + +// DdotUnitary is +// for i, v := range x { +// sum += float64(y[i]) * float64(v) +// } +// return +func DdotUnitary(x, y []float32) (sum float64) { + for i, v := range x { + sum += float64(y[i]) * float64(v) + } + return +} + +// DdotInc is +// for i := 0; i < int(n); i++ { +// sum += float64(y[iy]) * float64(x[ix]) +// ix += incX +// iy += incY +// } +// return +func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { + for i := 0; i < int(n); i++ { + sum += float64(y[iy]) * float64(x[ix]) + ix += incX + iy += incY + } + return +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s new file mode 100644 index 00000000..d9d61bb7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s @@ -0,0 +1,82 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1Norm(x []float64) float64 +TEXT ·L1Norm(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), SI // SI = &x + MOVQ x_len+8(FP), CX // CX = len(x) + XORQ AX, AX // i = 0 + PXOR X0, X0 // p_sum_i = 0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE absum_end + MOVQ CX, BX + ANDQ $7, BX // BX = len(x) % 8 + SHRQ $3, CX // CX = floor( len(x) / 8 ) + JZ absum_tail_start // if CX == 0 { goto absum_tail_start } + +absum_loop: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVUPS (SI)(AX*8), X8 // X_i = x[i:i+1] + MOVUPS 16(SI)(AX*8), X9 + MOVUPS 32(SI)(AX*8), X10 + MOVUPS 48(SI)(AX*8), X11 + ADDPD X8, X0 // p_sum_i += X_i ( positive values ) + ADDPD X9, X2 + ADDPD X10, X4 + ADDPD X11, X6 + SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) + SUBPD X9, X3 + SUBPD X10, X5 + SUBPD X11, X7 + MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) + MAXPD X3, X2 + MAXPD X5, X4 + MAXPD X7, X6 + MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i + MOVAPS X2, X3 + MOVAPS X4, X5 + MOVAPS X6, X7 + ADDQ $8, AX // i += 8 + LOOP absum_loop // } while --CX > 0 + + // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) + ADDPD X3, X0 + ADDPD X5, X7 + ADDPD X7, X0 + + // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] + MOVAPS X0, X1 + SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) + ADDSD X1, X0 + CMPQ BX, $0 + JE absum_end // if BX == 0 { goto absum_end } + +absum_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + XORPS X8, X8 // X_8 = 0 + +absum_tail: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI)(AX*8), X8 // X_8 = x[i] + MOVSD X0, X1 // p_sum_1 = p_sum_0 + ADDSD X8, X0 // p_sum_0 += X_8 + SUBSD X8, X1 // p_sum_1 -= X_8 + MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) + INCQ AX // i++ + LOOP absum_tail // } while --CX > 0 + +absum_end: // return p_sum_0 + MOVSD X0, sum+24(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s new file mode 100644 index 00000000..cac19aa6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s @@ -0,0 +1,90 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1NormInc(x []float64, n, incX int) (sum float64) +TEXT ·L1NormInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), SI // SI = &x + MOVQ n+24(FP), CX // CX = n + MOVQ incX+32(FP), AX // AX = increment * sizeof( float64 ) + SHLQ $3, AX + MOVQ AX, DX // DX = AX * 3 + IMULQ $3, DX + PXOR X0, X0 // p_sum_i = 0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE absum_end + MOVQ CX, BX + ANDQ $7, BX // BX = n % 8 + SHRQ $3, CX // CX = floor( n / 8 ) + JZ absum_tail_start // if CX == 0 { goto absum_tail_start } + +absum_loop: // do { + // p_sum = max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI), X8 // X_i[0] = x[i] + MOVSD (SI)(AX*1), X9 + MOVSD (SI)(AX*2), X10 + MOVSD (SI)(DX*1), X11 + LEAQ (SI)(AX*4), SI // SI = SI + 4 + MOVHPD (SI), X8 // X_i[1] = x[i+4] + MOVHPD (SI)(AX*1), X9 + MOVHPD (SI)(AX*2), X10 + MOVHPD (SI)(DX*1), X11 + ADDPD X8, X0 // p_sum_i += X_i ( positive values ) + ADDPD X9, X2 + ADDPD X10, X4 + ADDPD X11, X6 + SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) + SUBPD X9, X3 + SUBPD X10, X5 + SUBPD X11, X7 + MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) + MAXPD X3, X2 + MAXPD X5, X4 + MAXPD X7, X6 + MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i + MOVAPS X2, X3 + MOVAPS X4, X5 + MOVAPS X6, X7 + LEAQ (SI)(AX*4), SI // SI = SI + 4 + LOOP absum_loop // } while --CX > 0 + + // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) + ADDPD X3, X0 + ADDPD X5, X7 + ADDPD X7, X0 + + // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] + MOVAPS X0, X1 + SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) + ADDSD X1, X0 + CMPQ BX, $0 + JE absum_end // if BX == 0 { goto absum_end } + +absum_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + XORPS X8, X8 // X_8 = 0 + +absum_tail: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI), X8 // X_8 = x[i] + MOVSD X0, X1 // p_sum_1 = p_sum_0 + ADDSD X8, X0 // p_sum_0 += X_8 + SUBSD X8, X1 // p_sum_1 -= X_8 + MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) + ADDQ AX, SI // i++ + LOOP absum_tail // } while --CX > 0 + +absum_end: // return p_sum_0 + MOVSD X0, sum+40(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s new file mode 100644 index 00000000..bc0ea6a4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s @@ -0,0 +1,66 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Add(dst, s []float64) +TEXT ·Add(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE add_end + XORQ AX, AX + MOVQ DI, BX + ANDQ $0x0F, BX // BX = &dst & 15 + JZ add_no_trim // if BX == 0 { goto add_no_trim } + + // Align on 16-bit boundary + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD (DI)(AX*8), X0 // X0 += dst[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // i++ + DECQ CX // --CX + JE add_end // if CX == 0 { return } + +add_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ add_tail_start // if CX == 0 { goto add_tail_start } + +add_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + ADDPD (DI)(AX*8), X0 // X_i += dst[i:i+1] + ADDPD 16(DI)(AX*8), X1 + ADDPD 32(DI)(AX*8), X2 + ADDPD 48(DI)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X_i + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP add_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE add_end + +add_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +add_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD (DI)(AX*8), X0 // X0 += dst[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + LOOP add_tail // } while --CX > 0 + +add_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s new file mode 100644 index 00000000..7cc68c78 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s @@ -0,0 +1,53 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Addconst(alpha float64, x []float64) +TEXT ·AddConst(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ x_len+16(FP), CX // CX = len(x) + CMPQ CX, $0 // if len(x) == 0 { return } + JE ac_end + MOVSD alpha+0(FP), X4 // X4 = { a, a } + SHUFPD $0, X4, X4 + MOVUPS X4, X5 // X5 = X4 + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $7, BX // BX = len(x) % 8 + SHRQ $3, CX // CX = floor( len(x) / 8 ) + JZ ac_tail_start // if CX == 0 { goto ac_tail_start } + +ac_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + ADDPD X4, X0 // X_i += a + ADDPD X5, X1 + ADDPD X4, X2 + ADDPD X5, X3 + MOVUPS X0, (SI)(AX*8) // s[i:i+1] = X_i + MOVUPS X1, 16(SI)(AX*8) + MOVUPS X2, 32(SI)(AX*8) + MOVUPS X3, 48(SI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP ac_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE ac_end + +ac_tail_start: // Reset loop counters + MOVQ BX, CX // Loop counter: CX = BX + +ac_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD X4, X0 // X0 += a + MOVSD X0, (SI)(AX*8) // s[i] = X0 + INCQ AX // ++i + LOOP ac_tail // } while --CX > 0 + +ac_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go new file mode 100644 index 00000000..b8322139 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go @@ -0,0 +1,57 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float64, x, y []float64) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s new file mode 100644 index 00000000..95fe9f90 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s @@ -0,0 +1,142 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R11 +#define INC_Y R9 +#define INCx3_Y R12 +#define INC_DST R9 +#define INCx3_DST R12 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ y_base+32(FP), Y_PTR // Y_PTR = &y + MOVQ n+56(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + + MOVQ ix+80(FP), INC_X + MOVQ iy+88(FP), INC_Y + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(y[iy]) + MOVQ Y_PTR, DST_PTR // DST_PTR = Y_PTR // Write pointer + + MOVQ incX+64(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ incY+72(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + + MOVSD alpha+0(FP), ALPHA // ALPHA = alpha + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVAPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +loop: // do { // y[i] += alpha * x[i] unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + ADDSD (Y_PTR)(INC_Y*2), X4 + ADDSD (Y_PTR)(INCx3_Y*1), X5 + + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset Loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + // y[i] += alpha * x[i] for the last n % 4 iterations. + MOVSD (X_PTR), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[i] + MOVSD X2, (DST_PTR) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s new file mode 100644 index 00000000..dcb79d87 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s @@ -0,0 +1,148 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DX +#define IDX AX +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R11 +#define INC_Y R9 +#define INCx3_Y R12 +#define INC_DST R10 +#define INCx3_DST R13 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst + MOVQ x_base+48(FP), X_PTR // X_PTR := &x + MOVQ y_base+72(FP), Y_PTR // Y_PTR := &y + MOVQ n+96(FP), LEN // LEN := n + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + + MOVQ ix+120(FP), INC_X + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) + MOVQ iy+128(FP), INC_Y + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(dst[idst]) + MOVQ idst+32(FP), INC_DST + LEAQ (DST_PTR)(INC_DST*8), DST_PTR // DST_PTR = &(y[iy]) + + MOVQ incX+104(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ incY+112(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + MOVQ incDst+24(FP), INC_DST // INC_DST = incDst * sizeof(float64) + SHLQ $3, INC_DST + MOVSD alpha+40(FP), ALPHA + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVSD ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 + +loop: // do { // y[i] += alpha * x[i] unrolled 2x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + ADDSD (Y_PTR)(INC_Y*2), X4 + ADDSD (Y_PTR)(INCx3_Y*1), X5 + + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4] + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset Loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) + LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incY*2] + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[i] + MOVSD X2, (DST_PTR) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s new file mode 100644 index 00000000..bc290a15 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyUnitary(alpha float64, x, y []float64) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), X_PTR // X_PTR := &x + MOVQ y_base+32(FP), Y_PTR // Y_PTR := &y + MOVQ x_len+16(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+40(FP), LEN + CMOVQLE y_len+40(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + XORQ IDX, IDX + MOVSD alpha+0(FP), ALPHA // ALPHA := { alpha, alpha } + SHUFPD $0, ALPHA, ALPHA + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining + MOVQ Y_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + MOVSD (X_PTR), X2 // X2 := x[0] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[0] + MOVSD X2, (DST_PTR) // y[0] = X2 + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL := n % 8 + SHRQ $3, LEN // LEN = floor( n / 8 ) + JZ tail_start // if LEN == 0 { goto tail2_start } + +loop: // do { + // y[i] += alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= a + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] + ADDPD 16(Y_PTR)(IDX*8), X3 + ADDPD 32(Y_PTR)(IDX*8), X4 + ADDPD 48(Y_PTR)(IDX*8), X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if TAIL == 0 { goto tail } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] + MULPD ALPHA, X2 // X2 *= a + ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s new file mode 100644 index 00000000..16798eba --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DX +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst + MOVQ x_base+32(FP), X_PTR // X_PTR := &x + MOVQ y_base+56(FP), Y_PTR // Y_PTR := &y + MOVQ x_len+40(FP), LEN // LEN = min( len(x), len(y), len(dst) ) + CMPQ y_len+64(FP), LEN + CMOVQLE y_len+64(FP), LEN + CMPQ dst_len+8(FP), LEN + CMOVQLE dst_len+8(FP), LEN + + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + XORQ IDX, IDX // IDX = 0 + MOVSD alpha+24(FP), ALPHA + SHUFPD $0, ALPHA, ALPHA // ALPHA := { alpha, alpha } + MOVQ Y_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + MOVSD (X_PTR), X2 // X2 := x[0] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[0] + MOVSD X2, (DST_PTR) // y[0] = X2 + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL := n % 8 + SHRQ $3, LEN // LEN = floor( n / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining + +loop: // do { + // y[i] += alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= alpha + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] + ADDPD 16(Y_PTR)(IDX*8), X3 + ADDPD 32(Y_PTR)(IDX*8), X4 + ADDPD 48(Y_PTR)(IDX*8), X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if LEN == 0 { goto tail } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] + MULPD ALPHA, X2 // X2 *= alpha + ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s new file mode 100644 index 00000000..32bd1572 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s @@ -0,0 +1,71 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +TEXT ·CumProd(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + MOVQ CX, ret_len+56(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE cp_end + XORQ AX, AX // i = 0 + + MOVSD (SI), X5 // p_prod = { s[0], s[0] } + SHUFPD $0, X5, X5 + MOVSD X5, (DI) // dst[0] = s[0] + INCQ AX // ++i + DECQ CX // -- CX + JZ cp_end // if CX == 0 { return } + + MOVQ CX, BX + ANDQ $3, BX // BX = CX % 4 + SHRQ $2, CX // CX = floor( CX / 4 ) + JZ cp_tail_start // if CX == 0 { goto cp_tail_start } + +cp_loop: // Loop unrolled 4x do { + MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] + MOVUPS 16(SI)(AX*8), X2 + MOVAPS X0, X1 // X1 = X0 + MOVAPS X2, X3 + SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } + SHUFPD $1, X3, X3 + MULPD X0, X1 // X1 *= X0 + MULPD X2, X3 + SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } + SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } + SHUFPD $2, X3, X2 + SHUFPD $3, X3, X3 + MULPD X5, X0 // X0 *= p_prod + MULPD X1, X5 // p_prod *= X1 + MULPD X5, X2 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X2, 16(DI)(AX*8) + MULPD X3, X5 + ADDQ $4, AX // i += 4 + LOOP cp_loop // } while --CX > 0 + + // if BX == 0 { return } + CMPQ BX, $0 + JE cp_end + +cp_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +cp_tail: // do { + MULSD (SI)(AX*8), X5 // p_prod *= s[i] + MOVSD X5, (DI)(AX*8) // dst[i] = p_prod + INCQ AX // ++i + LOOP cp_tail // } while --CX > 0 + +cp_end: + MOVQ DI, ret_base+48(FP) // &ret = &dst + MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) + MOVQ SI, ret_cap+64(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s new file mode 100644 index 00000000..10d7fdab --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s @@ -0,0 +1,64 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +TEXT ·CumSum(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + MOVQ CX, ret_len+56(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE cs_end + XORQ AX, AX // i = 0 + PXOR X5, X5 // p_sum = 0 + MOVQ CX, BX + ANDQ $3, BX // BX = CX % 4 + SHRQ $2, CX // CX = floor( CX / 4 ) + JZ cs_tail_start // if CX == 0 { goto cs_tail_start } + +cs_loop: // Loop unrolled 4x do { + MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] + MOVUPS 16(SI)(AX*8), X2 + MOVAPS X0, X1 // X1 = X0 + MOVAPS X2, X3 + SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } + SHUFPD $1, X3, X3 + ADDPD X0, X1 // X1 += X0 + ADDPD X2, X3 + SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } + SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } + SHUFPD $2, X3, X2 + SHUFPD $3, X3, X3 + ADDPD X5, X0 // X0 += p_sum + ADDPD X1, X5 // p_sum += X1 + ADDPD X5, X2 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X2, 16(DI)(AX*8) + ADDPD X3, X5 + ADDQ $4, AX // i += 4 + LOOP cs_loop // } while --CX > 0 + + // if BX == 0 { return } + CMPQ BX, $0 + JE cs_end + +cs_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +cs_tail: // do { + ADDSD (SI)(AX*8), X5 // p_sum *= s[i] + MOVSD X5, (DI)(AX*8) // dst[i] = p_sum + INCQ AX // ++i + LOOP cs_tail // } while --CX > 0 + +cs_end: + MOVQ DI, ret_base+48(FP) // &ret = &dst + MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) + MOVQ SI, ret_cap+64(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s new file mode 100644 index 00000000..1a4e9eec --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s @@ -0,0 +1,67 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Div(dst, s []float64) +TEXT ·Div(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE div_end + XORQ AX, AX // i = 0 + MOVQ SI, BX + ANDQ $15, BX // BX = &s & 15 + JZ div_no_trim // if BX == 0 { goto div_no_trim } + + // Align on 16-bit boundary + MOVSD (DI)(AX*8), X0 // X0 = dst[i] + DIVSD (SI)(AX*8), X0 // X0 /= s[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + DECQ CX // --CX + JZ div_end // if CX == 0 { return } + +div_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ div_tail_start // if CX == 0 { goto div_tail_start } + +div_loop: // Loop unrolled 8x do { + MOVUPS (DI)(AX*8), X0 // X0 = dst[i:i+1] + MOVUPS 16(DI)(AX*8), X1 + MOVUPS 32(DI)(AX*8), X2 + MOVUPS 48(DI)(AX*8), X3 + DIVPD (SI)(AX*8), X0 // X0 /= s[i:i+1] + DIVPD 16(SI)(AX*8), X1 + DIVPD 32(SI)(AX*8), X2 + DIVPD 48(SI)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP div_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE div_end + +div_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +div_tail: // do { + MOVSD (DI)(AX*8), X0 // X0 = dst[i] + DIVSD (SI)(AX*8), X0 // X0 /= s[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + LOOP div_tail // } while --CX > 0 + +div_end: + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s new file mode 100644 index 00000000..16ab9b7e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s @@ -0,0 +1,73 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func DivTo(dst, x, y []float64) +TEXT ·DivTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ x_base+24(FP), SI // SI = &x + MOVQ y_base+48(FP), DX // DX = &y + CMPQ x_len+32(FP), CX // CX = max( len(dst), len(x), len(y) ) + CMOVQLE x_len+32(FP), CX + CMPQ y_len+56(FP), CX + CMOVQLE y_len+56(FP), CX + MOVQ CX, ret_len+80(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE div_end + XORQ AX, AX // i = 0 + MOVQ DX, BX + ANDQ $15, BX // BX = &y & OxF + JZ div_no_trim // if BX == 0 { goto div_no_trim } + + // Align on 16-bit boundary + MOVSD (SI)(AX*8), X0 // X0 = s[i] + DIVSD (DX)(AX*8), X0 // X0 /= t[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + DECQ CX // --CX + JZ div_end // if CX == 0 { return } + +div_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ div_tail_start // if CX == 0 { goto div_tail_start } + +div_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X0 = x[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + DIVPD (DX)(AX*8), X0 // X0 /= y[i:i+1] + DIVPD 16(DX)(AX*8), X1 + DIVPD 32(DX)(AX*8), X2 + DIVPD 48(DX)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X0 + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP div_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE div_end + +div_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +div_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = x[i] + DIVSD (DX)(AX*8), X0 // X0 /= y[i] + MOVSD X0, (DI)(AX*8) + INCQ AX // ++i + LOOP div_tail // } while --CX > 0 + +div_end: + MOVQ DI, ret_base+72(FP) // &ret = &dst + MOVQ dst_cap+16(FP), DI // cap(ret) = cap(dst) + MOVQ DI, ret_cap+88(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go new file mode 100644 index 00000000..33c76c1e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f64 provides float64 vector primitives. +package f64 // import "gonum.org/v1/gonum/internal/asm/f64" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go new file mode 100644 index 00000000..b77138d1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go @@ -0,0 +1,35 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float64) (sum float64) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s new file mode 100644 index 00000000..eff25059 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s @@ -0,0 +1,145 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func DdotUnitary(x, y []float64) (sum float64) +// This function assumes len(y) >= len(x). +TEXT ·DotUnitary(SB), NOSPLIT, $0 + MOVQ x+0(FP), R8 + MOVQ x_len+8(FP), DI // n = len(x) + MOVQ y+24(FP), R9 + + MOVSD $(0.0), X7 // sum = 0 + MOVSD $(0.0), X8 // sum = 0 + + MOVQ $0, SI // i = 0 + SUBQ $4, DI // n -= 4 + JL tail_uni // if n < 0 goto tail_uni + +loop_uni: + // sum += x[i] * y[i] unrolled 4x. + MOVUPD 0(R8)(SI*8), X0 + MOVUPD 0(R9)(SI*8), X1 + MOVUPD 16(R8)(SI*8), X2 + MOVUPD 16(R9)(SI*8), X3 + MULPD X1, X0 + MULPD X3, X2 + ADDPD X0, X7 + ADDPD X2, X8 + + ADDQ $4, SI // i += 4 + SUBQ $4, DI // n -= 4 + JGE loop_uni // if n >= 0 goto loop_uni + +tail_uni: + ADDQ $4, DI // n += 4 + JLE end_uni // if n <= 0 goto end_uni + +onemore_uni: + // sum += x[i] * y[i] for the remaining 1-3 elements. + MOVSD 0(R8)(SI*8), X0 + MOVSD 0(R9)(SI*8), X1 + MULSD X1, X0 + ADDSD X0, X7 + + ADDQ $1, SI // i++ + SUBQ $1, DI // n-- + JNZ onemore_uni // if n != 0 goto onemore_uni + +end_uni: + // Add the four sums together. + ADDPD X8, X7 + MOVSD X7, X0 + UNPCKHPD X7, X7 + ADDSD X0, X7 + MOVSD X7, sum+48(FP) // Return final sum. + RET + +// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) +TEXT ·DotInc(SB), NOSPLIT, $0 + MOVQ x+0(FP), R8 + MOVQ y+24(FP), R9 + MOVQ n+48(FP), CX + MOVQ incX+56(FP), R11 + MOVQ incY+64(FP), R12 + MOVQ ix+72(FP), R13 + MOVQ iy+80(FP), R14 + + MOVSD $(0.0), X7 // sum = 0 + LEAQ (R8)(R13*8), SI // p = &x[ix] + LEAQ (R9)(R14*8), DI // q = &y[ix] + SHLQ $3, R11 // incX *= sizeof(float64) + SHLQ $3, R12 // indY *= sizeof(float64) + + SUBQ $2, CX // n -= 2 + JL tail_inc // if n < 0 goto tail_inc + +loop_inc: + // sum += *p * *q unrolled 2x. + MOVHPD (SI), X0 + MOVHPD (DI), X1 + ADDQ R11, SI // p += incX + ADDQ R12, DI // q += incY + MOVLPD (SI), X0 + MOVLPD (DI), X1 + ADDQ R11, SI // p += incX + ADDQ R12, DI // q += incY + + MULPD X1, X0 + ADDPD X0, X7 + + SUBQ $2, CX // n -= 2 + JGE loop_inc // if n >= 0 goto loop_inc + +tail_inc: + ADDQ $2, CX // n += 2 + JLE end_inc // if n <= 0 goto end_inc + + // sum += *p * *q for the last iteration if n is odd. + MOVSD (SI), X0 + MULSD (DI), X0 + ADDSD X0, X7 + +end_inc: + // Add the two sums together. + MOVSD X7, X0 + UNPCKHPD X7, X7 + ADDSD X0, X7 + MOVSD X7, sum+88(FP) // Return final sum. + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go new file mode 100644 index 00000000..00c99e93 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go @@ -0,0 +1,22 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f64 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) + +// GemvN computes +// y = alpha * A * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) + +// GemvT computes +// y = alpha * A^T * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go new file mode 100644 index 00000000..2a1cfd5c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go @@ -0,0 +1,118 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) { + if incX == 1 && incY == 1 { + x = x[:m] + y = y[:n] + for i, xv := range x { + AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) + } + return + } + + var ky, kx uintptr + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + + ix := kx + for i := 0; i < int(m); i++ { + AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], n, incY, 1, ky, 0) + ix += incX + } +} + +// GemvN computes +// y = alpha * A * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { + var kx, ky, i uintptr + if int(incX) < 0 { + kx = uintptr(-int(n-1) * int(incX)) + } + if int(incY) < 0 { + ky = uintptr(-int(m-1) * int(incY)) + } + + if incX == 1 && incY == 1 { + if beta == 0 { + for i = 0; i < m; i++ { + y[i] = alpha * DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + for i = 0; i < m; i++ { + y[i] = y[i]*beta + alpha*DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + iy := ky + if beta == 0 { + for i = 0; i < m; i++ { + y[iy] = alpha * DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) + iy += incY + } + return + } + for i = 0; i < m; i++ { + y[iy] = y[iy]*beta + alpha*DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) + iy += incY + } +} + +// GemvT computes +// y = alpha * A^T * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { + var kx, ky, i uintptr + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + switch { + case beta == 0: // beta == 0 is special-cased to memclear + if incY == 1 { + for i := range y { + y[i] = 0 + } + } else { + iy := ky + for i := 0; i < int(n); i++ { + y[iy] = 0 + iy += incY + } + } + case int(incY) < 0: + ScalInc(beta, y, n, uintptr(int(-incY))) + case incY == 1: + ScalUnitary(beta, y[:n]) + default: + ScalInc(beta, y, n, incY) + } + + if incX == 1 && incY == 1 { + for i = 0; i < m; i++ { + AxpyUnitaryTo(y, alpha*x[i], a[lda*i:lda*i+n], y) + } + return + } + ix := kx + for i = 0; i < m; i++ { + AxpyInc(alpha*x[ix], a[lda*i:lda*i+n], y, n, 1, incY, 0, ky) + ix += incX + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s new file mode 100644 index 00000000..8d2a6a71 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s @@ -0,0 +1,685 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define X x_base+56(FP) +#define INC_X R8 +#define INC3_X R9 + +#define Y_PTR DX +#define Y y_base+96(FP) +#define INC_Y R10 +#define INC3_Y R11 + +#define A_ROW AX +#define A_PTR DI +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X15 +#define BETA X14 + +#define INIT4 \ + XORPS X0, X0 \ + XORPS X1, X1 \ + XORPS X2, X2 \ + XORPS X3, X3 + +#define INIT2 \ + XORPS X0, X0 \ + XORPS X1, X1 + +#define INIT1 \ + XORPS X0, X0 + +#define KERNEL_LOAD4 \ + MOVUPS (X_PTR), X12 \ + MOVUPS 2*SIZE(X_PTR), X13 + +#define KERNEL_LOAD2 \ + MOVUPS (X_PTR), X12 + +#define KERNEL_LOAD4_INC \ + MOVSD (X_PTR), X12 \ + MOVHPD (X_PTR)(INC_X*1), X12 \ + MOVSD (X_PTR)(INC_X*2), X13 \ + MOVHPD (X_PTR)(INC3_X*1), X13 + +#define KERNEL_LOAD2_INC \ + MOVSD (X_PTR), X12 \ + MOVHPD (X_PTR)(INC_X*1), X12 + +#define KERNEL_4x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MOVUPS (A_PTR)(LDA*2), X8 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X9 \ + MOVUPS (A_PTR)(LDA3*1), X10 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X11 \ + MULPD X12, X4 \ + MULPD X13, X5 \ + MULPD X12, X6 \ + MULPD X13, X7 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + MULPD X12, X10 \ + MULPD X13, X11 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDPD X6, X1 \ + ADDPD X7, X1 \ + ADDPD X8, X2 \ + ADDPD X9, X2 \ + ADDPD X10, X3 \ + ADDPD X11, X3 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MOVUPS (A_PTR)(LDA*2), X6 \ + MOVUPS (A_PTR)(LDA3*1), X7 \ + MULPD X12, X4 \ + MULPD X12, X5 \ + MULPD X12, X6 \ + MULPD X12, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X2 \ + ADDPD X7, X3 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVDDUP (X_PTR), X12 \ + MOVSD (A_PTR), X4 \ + MOVHPD (A_PTR)(LDA*1), X4 \ + MOVSD (A_PTR)(LDA*2), X5 \ + MOVHPD (A_PTR)(LDA3*1), X5 \ + MULPD X12, X4 \ + MULPD X12, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X2 \ + ADDQ $SIZE, A_PTR + +#define STORE4 \ + MOVUPS (Y_PTR), X4 \ + MOVUPS 2*SIZE(Y_PTR), X5 \ + MULPD ALPHA, X0 \ + MULPD ALPHA, X2 \ + MULPD BETA, X4 \ + MULPD BETA, X5 \ + ADDPD X0, X4 \ + ADDPD X2, X5 \ + MOVUPS X4, (Y_PTR) \ + MOVUPS X5, 2*SIZE(Y_PTR) + +#define STORE4_INC \ + MOVSD (Y_PTR), X4 \ + MOVHPD (Y_PTR)(INC_Y*1), X4 \ + MOVSD (Y_PTR)(INC_Y*2), X5 \ + MOVHPD (Y_PTR)(INC3_Y*1), X5 \ + MULPD ALPHA, X0 \ + MULPD ALPHA, X2 \ + MULPD BETA, X4 \ + MULPD BETA, X5 \ + ADDPD X0, X4 \ + ADDPD X2, X5 \ + MOVLPD X4, (Y_PTR) \ + MOVHPD X4, (Y_PTR)(INC_Y*1) \ + MOVLPD X5, (Y_PTR)(INC_Y*2) \ + MOVHPD X5, (Y_PTR)(INC3_Y*1) + +#define KERNEL_2x4 \ + MOVUPS (A_PTR), X8 \ + MOVUPS 2*SIZE(A_PTR), X9 \ + MOVUPS (A_PTR)(LDA*1), X10 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X11 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + MULPD X12, X10 \ + MULPD X13, X11 \ + ADDPD X8, X0 \ + ADDPD X10, X1 \ + ADDPD X9, X0 \ + ADDPD X11, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS (A_PTR), X8 \ + MOVUPS (A_PTR)(LDA*1), X9 \ + MULPD X12, X8 \ + MULPD X12, X9 \ + ADDPD X8, X0 \ + ADDPD X9, X1 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVDDUP (X_PTR), X12 \ + MOVSD (A_PTR), X8 \ + MOVHPD (A_PTR)(LDA*1), X8 \ + MULPD X12, X8 \ + ADDPD X8, X0 \ + ADDQ $SIZE, A_PTR + +#define STORE2 \ + MOVUPS (Y_PTR), X4 \ + MULPD ALPHA, X0 \ + MULPD BETA, X4 \ + ADDPD X0, X4 \ + MOVUPS X4, (Y_PTR) + +#define STORE2_INC \ + MOVSD (Y_PTR), X4 \ + MOVHPD (Y_PTR)(INC_Y*1), X4 \ + MULPD ALPHA, X0 \ + MULPD BETA, X4 \ + ADDPD X0, X4 \ + MOVSD X4, (Y_PTR) \ + MOVHPD X4, (Y_PTR)(INC_Y*1) + +#define KERNEL_1x4 \ + MOVUPS (A_PTR), X8 \ + MOVUPS 2*SIZE(A_PTR), X9 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + ADDPD X8, X0 \ + ADDPD X9, X0 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MOVUPS (A_PTR), X8 \ + MULPD X12, X8 \ + ADDPD X8, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (X_PTR), X12 \ + MOVSD (A_PTR), X8 \ + MULSD X12, X8 \ + ADDSD X8, X0 \ + ADDQ $SIZE, A_PTR + +#define STORE1 \ + HADDPD X0, X0 \ + MOVSD (Y_PTR), X4 \ + MULSD ALPHA, X0 \ + MULSD BETA, X4 \ + ADDSD X0, X4 \ + MOVSD X4, (Y_PTR) + +// func GemvN(m, n int, +// alpha float64, +// a []float64, lda int, +// x []float64, incX int, +// beta float64, +// y []float64, incY int) +TEXT ·GemvN(SB), NOSPLIT, $32-128 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + MOVDDUP beta+88(FP), BETA + + MOVQ x_base+56(FP), X_PTR + MOVQ y_base+96(FP), Y_PTR + MOVQ a_base+24(FP), A_ROW + MOVQ incY+120(FP), INC_Y + MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + MOVQ A_ROW, A_PTR + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + MOVQ Y_PTR, Y + + SHLQ $3, INC_Y // INC_Y = incY * sizeof(float64) + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + MOVSD $0.0, X0 + COMISD BETA, X0 + JNE gemv_start // if beta != 0 { goto gemv_start } + +gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + CMPQ incY+120(FP), $1 // Check for dense vector X (fast-path) + JNE inc_clear + + SHRQ $3, M + JZ clear4 + +clear8: + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + MOVUPS X2, 32(Y_PTR) + MOVUPS X3, 48(Y_PTR) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ clear8 + +clear4: + TESTQ $4, M_DIM + JZ clear2 + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + ADDQ $4*SIZE, Y_PTR + +clear2: + TESTQ $2, M_DIM + JZ clear1 + MOVUPS X0, (Y_PTR) + ADDQ $2*SIZE, Y_PTR + +clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + + JMP prep_end + +inc_clear: + SHRQ $2, M + JZ inc_clear2 + +inc_clear4: + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + MOVSD X2, (Y_PTR)(INC_Y*2) + MOVSD X3, (Y_PTR)(INC3_Y*1) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_clear4 + +inc_clear2: + TESTQ $2, M_DIM + JZ inc_clear1 + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + +prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +gemv_start: + CMPQ incX+80(FP), $1 // Check for dense vector X (fast-path) + JNE inc + + SHRQ $2, M + JZ r2 + +r4: + // LOAD 4 + INIT4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r4c2 + +r4c4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r4c4 + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + + ADDQ $2*SIZE, X_PTR + +r4c1: + HADDPD X1, X0 + HADDPD X3, X2 + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + + ADDQ $SIZE, X_PTR + +r4end: + CMPQ INC_Y, $SIZE + JNZ r4st_inc + + STORE4 + ADDQ $4*SIZE, Y_PTR + JMP r4inc + +r4st_inc: + STORE4_INC + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +r4inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + INIT2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r2c2 + +r2c4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r2c4 + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + + ADDQ $2*SIZE, X_PTR + +r2c1: + HADDPD X1, X0 + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + + ADDQ $SIZE, X_PTR + +r2end: + CMPQ INC_Y, $SIZE + JNE r2st_inc + + STORE2 + ADDQ $2*SIZE, Y_PTR + JMP r2inc + +r2st_inc: + STORE2_INC + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +r2inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + INIT1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r1c2 + +r1c4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r1c4 + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + + ADDQ $2*SIZE, X_PTR + +r1c1: + + TESTQ $1, N_DIM + JZ r1end + + // 1x1 KERNEL + KERNEL_1x1 + +r1end: + STORE1 + +end: + RET + +inc: // Algorithm for incX != 1 ( split loads in kernel ) + MOVQ incX+80(FP), INC_X // INC_X = incX + + XORQ TMP2, TMP2 // TMP2 = 0 + MOVQ N, TMP1 // TMP1 = N + SUBQ $1, TMP1 // TMP1 -= 1 + NEGQ TMP1 // TMP1 = -TMP1 + IMULQ INC_X, TMP1 // TMP1 *= INC_X + CMPQ INC_X, $0 // if INC_X < 0 { TMP2 = TMP1 } + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR // X_PTR = X_PTR[TMP2] + MOVQ X_PTR, X // X = X_PTR + + SHLQ $3, INC_X + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + INIT4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r4c2 + +inc_r4c4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + + DECQ N + JNZ inc_r4c4 + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r4c1: + HADDPD X1, X0 + HADDPD X3, X2 + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + + ADDQ INC_X, X_PTR + +inc_r4end: + CMPQ INC_Y, $SIZE + JNE inc_r4st_inc + + STORE4 + ADDQ $4*SIZE, Y_PTR + JMP inc_r4inc + +inc_r4st_inc: + STORE4_INC + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r4inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + INIT2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r2c2 + +inc_r2c4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + DECQ N + JNZ inc_r2c4 + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r2c1: + HADDPD X1, X0 + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + + ADDQ INC_X, X_PTR + +inc_r2end: + CMPQ INC_Y, $SIZE + JNE inc_r2st_inc + + STORE2 + ADDQ $2*SIZE, Y_PTR + JMP inc_r2inc + +inc_r2st_inc: + STORE2_INC + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ inc_end + + // LOAD 1 + INIT1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r1c2 + +inc_r1c4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + DECQ N + JNZ inc_r1c4 + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ inc_r1end + + // 1x1 KERNEL + KERNEL_1x1 + +inc_r1end: + STORE1 + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s new file mode 100644 index 00000000..ff7d60fb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s @@ -0,0 +1,745 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM n+8(FP) +#define M CX +#define N_DIM m+0(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define X x_base+56(FP) +#define Y_PTR DX +#define Y y_base+96(FP) +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X15 +#define BETA X14 + +#define INIT4 \ + MOVDDUP (X_PTR), X8 \ + MOVDDUP (X_PTR)(INC_X*1), X9 \ + MOVDDUP (X_PTR)(INC_X*2), X10 \ + MOVDDUP (X_PTR)(INC3_X*1), X11 \ + MULPD ALPHA, X8 \ + MULPD ALPHA, X9 \ + MULPD ALPHA, X10 \ + MULPD ALPHA, X11 + +#define INIT2 \ + MOVDDUP (X_PTR), X8 \ + MOVDDUP (X_PTR)(INC_X*1), X9 \ + MULPD ALPHA, X8 \ + MULPD ALPHA, X9 + +#define INIT1 \ + MOVDDUP (X_PTR), X8 \ + MULPD ALPHA, X8 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X0 \ + MOVUPS 2*SIZE(Y_PTR), X1 + +#define KERNEL_LOAD2 \ + MOVUPS (Y_PTR), X0 + +#define KERNEL_LOAD4_INC \ + MOVSD (Y_PTR), X0 \ + MOVHPD (Y_PTR)(INC_Y*1), X0 \ + MOVSD (Y_PTR)(INC_Y*2), X1 \ + MOVHPD (Y_PTR)(INC3_Y*1), X1 + +#define KERNEL_LOAD2_INC \ + MOVSD (Y_PTR), X0 \ + MOVHPD (Y_PTR)(INC_Y*1), X0 + +#define KERNEL_4x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + MULPD X9, X6 \ + MULPD X9, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + MOVUPS (A_PTR)(LDA*2), X4 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X5 \ + MOVUPS (A_PTR)(LDA3*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X7 \ + MULPD X10, X4 \ + MULPD X10, X5 \ + MULPD X11, X6 \ + MULPD X11, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + MULPD X9, X6 \ + MULPD X9, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDQ $4*SIZE, A_PTR + +#define STORE4 \ + MOVUPS X0, (Y_PTR) \ + MOVUPS X1, 2*SIZE(Y_PTR) + +#define STORE4_INC \ + MOVLPD X0, (Y_PTR) \ + MOVHPD X0, (Y_PTR)(INC_Y*1) \ + MOVLPD X1, (Y_PTR)(INC_Y*2) \ + MOVHPD X1, (Y_PTR)(INC3_Y*1) + +#define KERNEL_2x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MOVUPS (A_PTR)(LDA*2), X6 \ + MOVUPS (A_PTR)(LDA3*1), X7 \ + MULPD X8, X4 \ + MULPD X9, X5 \ + MULPD X10, X6 \ + MULPD X11, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDPD X6, X0 \ + ADDPD X7, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MULPD X8, X4 \ + MULPD X9, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVUPS (A_PTR), X4 \ + MULPD X8, X4 \ + ADDPD X4, X0 \ + ADDQ $2*SIZE, A_PTR + +#define STORE2 \ + MOVUPS X0, (Y_PTR) + +#define STORE2_INC \ + MOVLPD X0, (Y_PTR) \ + MOVHPD X0, (Y_PTR)(INC_Y*1) + +#define KERNEL_1x4 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MOVSD (A_PTR)(LDA*1), X5 \ + MOVSD (A_PTR)(LDA*2), X6 \ + MOVSD (A_PTR)(LDA3*1), X7 \ + MULSD X8, X4 \ + MULSD X9, X5 \ + MULSD X10, X6 \ + MULSD X11, X7 \ + ADDSD X4, X0 \ + ADDSD X5, X0 \ + ADDSD X6, X0 \ + ADDSD X7, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x2 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MOVSD (A_PTR)(LDA*1), X5 \ + MULSD X8, X4 \ + MULSD X9, X5 \ + ADDSD X4, X0 \ + ADDSD X5, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MULSD X8, X4 \ + ADDSD X4, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define SCALE_8(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MOVUPS 16(PTR), X1 \ + MOVUPS 32(PTR), X2 \ + MOVUPS 48(PTR), X3 \ + MULPD SCAL, X0 \ + MULPD SCAL, X1 \ + MULPD SCAL, X2 \ + MULPD SCAL, X3 \ + MOVUPS X0, (PTR) \ + MOVUPS X1, 16(PTR) \ + MOVUPS X2, 32(PTR) \ + MOVUPS X3, 48(PTR) + +#define SCALE_4(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MOVUPS 16(PTR), X1 \ + MULPD SCAL, X0 \ + MULPD SCAL, X1 \ + MOVUPS X0, (PTR) \ + MOVUPS X1, 16(PTR) \ + +#define SCALE_2(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MULPD SCAL, X0 \ + MOVUPS X0, (PTR) \ + +#define SCALE_1(PTR, SCAL) \ + MOVSD (PTR), X0 \ + MULSD SCAL, X0 \ + MOVSD X0, (PTR) \ + +#define SCALEINC_4(PTR, INC, INC3, SCAL) \ + MOVSD (PTR), X0 \ + MOVSD (PTR)(INC*1), X1 \ + MOVSD (PTR)(INC*2), X2 \ + MOVSD (PTR)(INC3*1), X3 \ + MULSD SCAL, X0 \ + MULSD SCAL, X1 \ + MULSD SCAL, X2 \ + MULSD SCAL, X3 \ + MOVSD X0, (PTR) \ + MOVSD X1, (PTR)(INC*1) \ + MOVSD X2, (PTR)(INC*2) \ + MOVSD X3, (PTR)(INC3*1) + +#define SCALEINC_2(PTR, INC, SCAL) \ + MOVSD (PTR), X0 \ + MOVSD (PTR)(INC*1), X1 \ + MULSD SCAL, X0 \ + MULSD SCAL, X1 \ + MOVSD X0, (PTR) \ + MOVSD X1, (PTR)(INC*1) + +// func GemvT(m, n int, +// alpha float64, +// a []float64, lda int, +// x []float64, incX int, +// beta float64, +// y []float64, incY int) +TEXT ·GemvT(SB), NOSPLIT, $32-128 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + + MOVQ x_base+56(FP), X_PTR + MOVQ y_base+96(FP), Y_PTR + MOVQ a_base+24(FP), A_ROW + MOVQ incY+120(FP), INC_Y // INC_Y = incY * sizeof(float64) + MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + MOVQ A_ROW, A_PTR + + MOVQ incX+80(FP), INC_X // INC_X = incX * sizeof(float64) + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + NEGQ TMP1 + IMULQ INC_X, TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + MOVQ X_PTR, X + + SHLQ $3, INC_X + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + + CMPQ incY+120(FP), $1 // Check for dense vector Y (fast-path) + JNE inc + + MOVSD $1.0, X0 + COMISD beta+88(FP), X0 + JE gemv_start + + MOVSD $0.0, X0 + COMISD beta+88(FP), X0 + JE gemv_clear + + MOVDDUP beta+88(FP), BETA + SHRQ $3, M + JZ scal4 + +scal8: + SCALE_8(Y_PTR, BETA) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ scal8 + +scal4: + TESTQ $4, M_DIM + JZ scal2 + SCALE_4(Y_PTR, BETA) + ADDQ $4*SIZE, Y_PTR + +scal2: + TESTQ $2, M_DIM + JZ scal1 + SCALE_2(Y_PTR, BETA) + ADDQ $2*SIZE, Y_PTR + +scal1: + TESTQ $1, M_DIM + JZ prep_end + SCALE_1(Y_PTR, BETA) + + JMP prep_end + +gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + SHRQ $3, M + JZ clear4 + +clear8: + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + MOVUPS X2, 32(Y_PTR) + MOVUPS X3, 48(Y_PTR) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ clear8 + +clear4: + TESTQ $4, M_DIM + JZ clear2 + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + ADDQ $4*SIZE, Y_PTR + +clear2: + TESTQ $2, M_DIM + JZ clear1 + MOVUPS X0, (Y_PTR) + ADDQ $2*SIZE, Y_PTR + +clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + +prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +gemv_start: + SHRQ $2, N + JZ c2 + +c4: + // LOAD 4 + INIT4 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c4r2 + +c4r4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c4r4 + +c4r2: + TESTQ $2, M_DIM + JZ c4r1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x4 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c4r1: + TESTQ $1, M_DIM + JZ c4end + + // 4x1 KERNEL + KERNEL_1x4 + + ADDQ $SIZE, Y_PTR + +c4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ N + JNZ c4 + +c2: + TESTQ $2, N_DIM + JZ c1 + + // LOAD 2 + INIT2 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c2r2 + +c2r4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x2 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c2r4 + +c2r2: + TESTQ $2, M_DIM + JZ c2r1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c2r1: + TESTQ $1, M_DIM + JZ c2end + + // 2x1 KERNEL + KERNEL_1x2 + + ADDQ $SIZE, Y_PTR + +c2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +c1: + TESTQ $1, N_DIM + JZ end + + // LOAD 1 + INIT1 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c1r2 + +c1r4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x1 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c1r4 + +c1r2: + TESTQ $2, M_DIM + JZ c1r1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x1 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c1r1: + TESTQ $1, M_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + +end: + RET + +inc: // Algorithm for incX != 0 ( split loads in kernel ) + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + MOVQ Y_PTR, Y + + SHLQ $3, INC_Y + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + MOVSD $1.0, X0 + COMISD beta+88(FP), X0 + JE inc_gemv_start + + MOVSD $0.0, X0 + COMISD beta+88(FP), X0 + JE inc_gemv_clear + + MOVDDUP beta+88(FP), BETA + SHRQ $2, M + JZ inc_scal2 + +inc_scal4: + SCALEINC_4(Y_PTR, INC_Y, INC3_Y, BETA) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_scal4 + +inc_scal2: + TESTQ $2, M_DIM + JZ inc_scal1 + + SCALEINC_2(Y_PTR, INC_Y, BETA) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_scal1: + TESTQ $1, M_DIM + JZ inc_prep_end + SCALE_1(Y_PTR, BETA) + + JMP inc_prep_end + +inc_gemv_clear: // beta == 0 is special-cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + SHRQ $2, M + JZ inc_clear2 + +inc_clear4: + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + MOVSD X2, (Y_PTR)(INC_Y*2) + MOVSD X3, (Y_PTR)(INC3_Y*1) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_clear4 + +inc_clear2: + TESTQ $2, M_DIM + JZ inc_clear1 + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_clear1: + TESTQ $1, M_DIM + JZ inc_prep_end + MOVSD X0, (Y_PTR) + +inc_prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +inc_gemv_start: + SHRQ $2, N + JZ inc_c2 + +inc_c4: + // LOAD 4 + INIT4 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c4r2 + +inc_c4r4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + + DECQ M + JNZ inc_c4r4 + +inc_c4r2: + TESTQ $2, M_DIM + JZ inc_c4r1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x4 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c4r1: + TESTQ $1, M_DIM + JZ inc_c4end + + // 4x1 KERNEL + KERNEL_1x4 + + ADDQ INC_Y, Y_PTR + +inc_c4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ N + JNZ inc_c4 + +inc_c2: + TESTQ $2, N_DIM + JZ inc_c1 + + // LOAD 2 + INIT2 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c2r2 + +inc_c2r4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x2 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_c2r4 + +inc_c2r2: + TESTQ $2, M_DIM + JZ inc_c2r1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c2r1: + TESTQ $1, M_DIM + JZ inc_c2end + + // 2x1 KERNEL + KERNEL_1x2 + + ADDQ INC_Y, Y_PTR + +inc_c2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_c1: + TESTQ $1, N_DIM + JZ inc_end + + // LOAD 1 + INIT1 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c1r2 + +inc_c1r4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x1 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_c1r4 + +inc_c1r2: + TESTQ $2, M_DIM + JZ inc_c1r1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x1 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c1r1: + TESTQ $1, M_DIM + JZ inc_end + + // 1x1 KERNEL + KERNEL_1x1 + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s new file mode 100644 index 00000000..8c1b36a6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s @@ -0,0 +1,591 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define Y y_base+56(FP) +#define Y_PTR DX +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X0 + +#define LOAD4 \ + PREFETCHNTA (X_PTR )(INC_X*8) \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP (X_PTR)(INC_X*1), X2 \ + MOVDDUP (X_PTR)(INC_X*2), X3 \ + MOVDDUP (X_PTR)(INC3_X*1), X4 \ + MULPD ALPHA, X1 \ + MULPD ALPHA, X2 \ + MULPD ALPHA, X3 \ + MULPD ALPHA, X4 + +#define LOAD2 \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP (X_PTR)(INC_X*1), X2 \ + MULPD ALPHA, X1 \ + MULPD ALPHA, X2 + +#define LOAD1 \ + MOVDDUP (X_PTR), X1 \ + MULPD ALPHA, X1 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X5 \ + MOVUPS 2*SIZE(Y_PTR), X6 + +#define KERNEL_LOAD4_INC \ + MOVLPD (Y_PTR), X5 \ + MOVHPD (Y_PTR)(INC_Y*1), X5 \ + MOVLPD (Y_PTR)(INC_Y*2), X6 \ + MOVHPD (Y_PTR)(INC3_Y*1), X6 + +#define KERNEL_LOAD2 \ + MOVUPS (Y_PTR), X5 + +#define KERNEL_LOAD2_INC \ + MOVLPD (Y_PTR), X5 \ + MOVHPD (Y_PTR)(INC_Y*1), X5 + +#define KERNEL_4x4 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MOVUPS X5, X9 \ + MOVUPS X6, X10 \ + MOVUPS X5, X11 \ + MOVUPS X6, X12 \ + MULPD X1, X5 \ + MULPD X1, X6 \ + MULPD X2, X7 \ + MULPD X2, X8 \ + MULPD X3, X9 \ + MULPD X3, X10 \ + MULPD X4, X11 \ + MULPD X4, X12 + +#define STORE_4x4 \ + MOVUPS (A_PTR), X13 \ + ADDPD X13, X5 \ + MOVUPS 2*SIZE(A_PTR), X14 \ + ADDPD X14, X6 \ + MOVUPS (A_PTR)(LDA*1), X15 \ + ADDPD X15, X7 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X0 \ + ADDPD X0, X8 \ + MOVUPS (A_PTR)(LDA*2), X13 \ + ADDPD X13, X9 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X14 \ + ADDPD X14, X10 \ + MOVUPS (A_PTR)(LDA3*1), X15 \ + ADDPD X15, X11 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X0 \ + ADDPD X0, X12 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ + MOVUPS X9, (A_PTR)(LDA*2) \ + MOVUPS X10, 2*SIZE(A_PTR)(LDA*2) \ + MOVUPS X11, (A_PTR)(LDA3*1) \ + MOVUPS X12, 2*SIZE(A_PTR)(LDA3*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPD X1, X5 \ + MULPD X2, X6 \ + MULPD X3, X7 \ + MULPD X4, X8 + +#define STORE_4x2 \ + MOVUPS (A_PTR), X9 \ + ADDPD X9, X5 \ + MOVUPS (A_PTR)(LDA*1), X10 \ + ADDPD X10, X6 \ + MOVUPS (A_PTR)(LDA*2), X11 \ + ADDPD X11, X7 \ + MOVUPS (A_PTR)(LDA3*1), X12 \ + ADDPD X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + MOVUPS X7, (A_PTR)(LDA*2) \ + MOVUPS X8, (A_PTR)(LDA3*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVSD (Y_PTR), X5 \ + MOVSD X5, X6 \ + MOVSD X5, X7 \ + MOVSD X5, X8 \ + MULSD X1, X5 \ + MULSD X2, X6 \ + MULSD X3, X7 \ + MULSD X4, X8 + +#define STORE_4x1 \ + ADDSD (A_PTR), X5 \ + ADDSD (A_PTR)(LDA*1), X6 \ + ADDSD (A_PTR)(LDA*2), X7 \ + ADDSD (A_PTR)(LDA3*1), X8 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + MOVSD X7, (A_PTR)(LDA*2) \ + MOVSD X8, (A_PTR)(LDA3*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_2x4 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MULPD X1, X5 \ + MULPD X1, X6 \ + MULPD X2, X7 \ + MULPD X2, X8 + +#define STORE_2x4 \ + MOVUPS (A_PTR), X9 \ + ADDPD X9, X5 \ + MOVUPS 2*SIZE(A_PTR), X10 \ + ADDPD X10, X6 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPD X11, X7 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X12 \ + ADDPD X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS X5, X6 \ + MULPD X1, X5 \ + MULPD X2, X6 + +#define STORE_2x2 \ + MOVUPS (A_PTR), X7 \ + ADDPD X7, X5 \ + MOVUPS (A_PTR)(LDA*1), X8 \ + ADDPD X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVSD (Y_PTR), X5 \ + MOVSD X5, X6 \ + MULSD X1, X5 \ + MULSD X2, X6 + +#define STORE_2x1 \ + ADDSD (A_PTR), X5 \ + ADDSD (A_PTR)(LDA*1), X6 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x4 \ + MULPD X1, X5 \ + MULPD X1, X6 + +#define STORE_1x4 \ + MOVUPS (A_PTR), X7 \ + ADDPD X7, X5 \ + MOVUPS 2*SIZE(A_PTR), X8 \ + ADDPD X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MULPD X1, X5 + +#define STORE_1x2 \ + MOVUPS (A_PTR), X6 \ + ADDPD X6, X5 \ + MOVUPS X5, (A_PTR) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (Y_PTR), X5 \ + MULSD X1, X5 + +#define STORE_1x1 \ + ADDSD (A_PTR), X5 \ + MOVSD X5, (A_PTR) \ + ADDQ $SIZE, A_PTR + +// func Ger(m, n uintptr, alpha float64, +// x []float64, incX uintptr, +// y []float64, incY uintptr, +// a []float64, lda uintptr) +TEXT ·Ger(SB), NOSPLIT, $0 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + + MOVQ x_base+24(FP), X_PTR + MOVQ y_base+56(FP), Y_PTR + MOVQ a_base+88(FP), A_ROW + MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + MOVQ A_ROW, A_PTR + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_X, TMP1 + NEGQ TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + + CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) + JG inc + JL end + + SHRQ $2, M + JZ r2 + +r4: + // LOAD 4 + LOAD4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r4c2 + +r4c4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE_4x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r4c4 + + // Reload ALPHA after it's clobbered by STORE_4x4 + MOVDDUP alpha+16(FP), ALPHA + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + STORE_4x2 + + ADDQ $2*SIZE, Y_PTR + +r4c1: + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ $SIZE, Y_PTR + +r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + LOAD2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r2c2 + +r2c4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + STORE_2x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r2c4 + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE_2x2 + + ADDQ $2*SIZE, Y_PTR + +r2c1: + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ $SIZE, Y_PTR + +r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r1c2 + +r1c4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + STORE_1x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r1c4 + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + STORE_1x2 + + ADDQ $2*SIZE, Y_PTR + +r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + + ADDQ $SIZE, Y_PTR + +end: + RET + +inc: // Algorithm for incY != 1 ( split loads in kernel ) + + MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + LOAD4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r4c2 + +inc_r4c4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE_4x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r4c4 + + // Reload ALPHA after it's clobbered by STORE_4x4 + MOVDDUP alpha+16(FP), ALPHA + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + STORE_4x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r4c1: + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ INC_Y, Y_PTR + +inc_r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + LOAD2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r2c2 + +inc_r2c4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + STORE_2x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r2c4 + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE_2x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2c1: + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ INC_Y, Y_PTR + +inc_r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r1c2 + +inc_r1c4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + STORE_1x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r1c4 + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + STORE_1x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + + ADDQ INC_Y, Y_PTR + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s new file mode 100644 index 00000000..f87f856c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s @@ -0,0 +1,58 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1Dist(s, t []float64) float64 +TEXT ·L1Dist(SB), NOSPLIT, $0 + MOVQ s_base+0(FP), DI // DI = &s + MOVQ t_base+24(FP), SI // SI = &t + MOVQ s_len+8(FP), CX // CX = len(s) + CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) + CMOVQLE t_len+32(FP), CX + PXOR X3, X3 // norm = 0 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE l1_end + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $1, BX // BX = CX % 2 + SHRQ $1, CX // CX = floor( CX / 2 ) + JZ l1_tail_start // if CX == 0 { return 0 } + +l1_loop: // Loop unrolled 2x do { + MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] + MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] + MOVAPS X0, X2 + SUBPD X1, X0 + SUBPD X2, X1 + MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + ADDPD X0, X3 // norm += X0 + ADDQ $2, AX // i += 2 + LOOP l1_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE l1_end + +l1_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + PXOR X0, X0 // reset X0, X1 to break dependencies + PXOR X1, X1 + +l1_tail: + MOVSD (SI)(AX*8), X0 // X0 = t[i] + MOVSD (DI)(AX*8), X1 // x1 = s[i] + MOVAPD X0, X2 + SUBSD X1, X0 + SUBSD X2, X1 + MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + ADDSD X0, X3 // norm += X0 + +l1_end: + MOVAPS X3, X2 + SHUFPD $1, X2, X2 + ADDSD X3, X2 // X2 = X3[1] + X3[0] + MOVSD X2, ret+48(FP) // return X2 + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s new file mode 100644 index 00000000..b0625928 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s @@ -0,0 +1,57 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func LinfDist(s, t []float64) float64 +TEXT ·LinfDist(SB), NOSPLIT, $0 + MOVQ s_base+0(FP), DI // DI = &s + MOVQ t_base+24(FP), SI // SI = &t + MOVQ s_len+8(FP), CX // CX = len(s) + CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) + CMOVQLE t_len+32(FP), CX + PXOR X3, X3 // norm = 0 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE l1_end + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $1, BX // BX = CX % 2 + SHRQ $1, CX // CX = floor( CX / 2 ) + JZ l1_tail_start // if CX == 0 { return 0 } + +l1_loop: // Loop unrolled 2x do { + MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] + MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] + MOVAPS X0, X2 + SUBPD X1, X0 + SUBPD X2, X1 + MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + MAXPD X0, X3 // norm = max( norm, X0 ) + ADDQ $2, AX // i += 2 + LOOP l1_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE l1_end + +l1_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + PXOR X0, X0 // reset X0, X1 to break dependencies + PXOR X1, X1 + +l1_tail: + MOVSD (SI)(AX*8), X0 // X0 = t[i] + MOVSD (DI)(AX*8), X1 // X1 = s[i] + MOVAPD X0, X2 + SUBSD X1, X0 + SUBSD X2, X1 + MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + MAXSD X0, X3 // norm = max( norm, X0 ) + +l1_end: + MOVAPS X3, X2 + SHUFPD $1, X2, X2 + MAXSD X3, X2 // X2 = max( X3[1], X3[0] ) + MOVSD X2, ret+48(FP) // return X2 + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go new file mode 100644 index 00000000..3cc7aca6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go @@ -0,0 +1,57 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float64, x []float64) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float64, alpha float64, x []float64) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float64, x []float64, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s new file mode 100644 index 00000000..fb8b545e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R9 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalInc(alpha float64, x []float64, n, incX uintptr) +TEXT ·ScalInc(SB), NOSPLIT, $0 + MOVSD alpha+0(FP), ALPHA // ALPHA = alpha + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ incX+40(FP), INC_X // INC_X = incX + SHLQ $3, INC_X // INC_X *= sizeof(float64) + MOVQ n+32(FP), LEN // LEN = n + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + +loop: // do { // x[i] *= alpha unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + MOVSD X2, (X_PTR) // x[i] = X_i + MOVSD X3, (X_PTR)(INC_X*1) + MOVSD X4, (X_PTR)(INC_X*2) + MOVSD X5, (X_PTR)(INCx3_X*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: // do { + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + MOVSD X2, (X_PTR) // x[i] = X_i + MOVSD X3, (X_PTR)(INC_X*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + + ANDQ $1, TAIL + JZ end + +tail_one: + MOVSD (X_PTR), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (X_PTR) // x[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s new file mode 100644 index 00000000..186fd1c0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s @@ -0,0 +1,122 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define DST_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R9 +#define INC_DST R10 +#define INCx3_DST R11 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) +TEXT ·ScalIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst + MOVQ incDst+24(FP), INC_DST // INC_DST = incDst + SHLQ $3, INC_DST // INC_DST *= sizeof(float64) + MOVSD alpha+32(FP), ALPHA // ALPHA = alpha + MOVQ x_base+40(FP), X_PTR // X_PTR = &x + MOVQ n+64(FP), LEN // LEN = n + MOVQ incX+72(FP), INC_X // INC_X = incX + SHLQ $3, INC_X // INC_X *= sizeof(float64) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 + +loop: // do { // x[i] *= alpha unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + MOVSD X2, (DST_PTR) // dst[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + MOVSD X2, (DST_PTR) // dst[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incDst*2]) + + ANDQ $1, TAIL + JZ end + +tail_one: + MOVSD (X_PTR), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (DST_PTR) // x[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s new file mode 100644 index 00000000..f852c7f7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s @@ -0,0 +1,112 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // @ MOVDDUP XMM0, 8[RSP] + +#define X_PTR SI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalUnitary(alpha float64, x []float64) +TEXT ·ScalUnitary(SB), NOSPLIT, $0 + MOVDDUP_ALPHA // ALPHA = { alpha, alpha } + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ x_len+16(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + XORQ IDX, IDX // IDX = 0 + + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 + +loop: // do { // x[i] *= alpha unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i + MOVUPS X3, 16(X_PTR)(IDX*8) + MOVUPS X4, 32(X_PTR)(IDX*8) + MOVUPS X5, 48(X_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if n == 0 goto end + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { return } + +tail_one: + // x[i] *= alpha for the remaining element. + MOVSD (X_PTR)(IDX*8), X2 + MULSD ALPHA, X2 + MOVSD X2, (X_PTR)(IDX*8) + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s new file mode 100644 index 00000000..d2b607f5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x2024 // @ MOVDDUP 32(SP), X0 /*XMM0, 32[RSP]*/ + +#define X_PTR SI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalUnitaryTo(dst []float64, alpha float64, x []float64) +// This function assumes len(dst) >= len(x). +TEXT ·ScalUnitaryTo(SB), NOSPLIT, $0 + MOVQ x_base+32(FP), X_PTR // X_PTR = &x + MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst + MOVDDUP_ALPHA // ALPHA = { alpha, alpha } + MOVQ x_len+40(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + XORQ IDX, IDX // IDX = 0 + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + +loop: // do { // dst[i] = alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop counters + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if LEN == 0 { goto tail_one } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { return } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (DST_PTR)(IDX*8) // dst[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go new file mode 100644 index 00000000..a51b9451 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go @@ -0,0 +1,172 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f64 + +// L1Norm is +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum +func L1Norm(x []float64) (sum float64) + +// L1NormInc is +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum +func L1NormInc(x []float64, n, incX int) (sum float64) + +// AddConst is +// for i := range x { +// x[i] += alpha +// } +func AddConst(alpha float64, x []float64) + +// Add is +// for i, v := range s { +// dst[i] += v +// } +func Add(dst, s []float64) + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float64, x, y []float64) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) + +// CumSum is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst +func CumSum(dst, s []float64) []float64 + +// CumProd is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst +func CumProd(dst, s []float64) []float64 + +// Div is +// for i, v := range s { +// dst[i] /= v +// } +func Div(dst, s []float64) + +// DivTo is +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst +func DivTo(dst, x, y []float64) []float64 + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float64) (sum float64) + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) + +// L1Dist is +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm +func L1Dist(s, t []float64) float64 + +// LinfDist is +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm +func LinfDist(s, t []float64) float64 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float64, x []float64) + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float64, alpha float64, x []float64) + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float64, x []float64, n, incX uintptr) + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) + +// Sum is +// var sum float64 +// for i := range x { +// sum += x[i] +// } +func Sum(x []float64) float64 diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go new file mode 100644 index 00000000..670978aa --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go @@ -0,0 +1,170 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +import "math" + +// L1Norm is +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum +func L1Norm(x []float64) (sum float64) { + for _, v := range x { + sum += math.Abs(v) + } + return sum +} + +// L1NormInc is +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum +func L1NormInc(x []float64, n, incX int) (sum float64) { + for i := 0; i < n*incX; i += incX { + sum += math.Abs(x[i]) + } + return sum +} + +// Add is +// for i, v := range s { +// dst[i] += v +// } +func Add(dst, s []float64) { + for i, v := range s { + dst[i] += v + } +} + +// AddConst is +// for i := range x { +// x[i] += alpha +// } +func AddConst(alpha float64, x []float64) { + for i := range x { + x[i] += alpha + } +} + +// CumSum is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst +func CumSum(dst, s []float64) []float64 { + if len(s) == 0 { + return dst + } + dst[0] = s[0] + for i, v := range s[1:] { + dst[i+1] = dst[i] + v + } + return dst +} + +// CumProd is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst +func CumProd(dst, s []float64) []float64 { + if len(s) == 0 { + return dst + } + dst[0] = s[0] + for i, v := range s[1:] { + dst[i+1] = dst[i] * v + } + return dst +} + +// Div is +// for i, v := range s { +// dst[i] /= v +// } +func Div(dst, s []float64) { + for i, v := range s { + dst[i] /= v + } +} + +// DivTo is +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst +func DivTo(dst, s, t []float64) []float64 { + for i, v := range s { + dst[i] = v / t[i] + } + return dst +} + +// L1Dist is +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm +func L1Dist(s, t []float64) float64 { + var norm float64 + for i, v := range s { + norm += math.Abs(t[i] - v) + } + return norm +} + +// LinfDist is +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm +func LinfDist(s, t []float64) float64 { + var norm float64 + if len(s) == 0 { + return 0 + } + norm = math.Abs(t[0] - s[0]) + for i, v := range s[1:] { + absDiff := math.Abs(t[i+1] - v) + if absDiff > norm || math.IsNaN(norm) { + norm = absDiff + } + } + return norm +} + +// Sum is +// var sum float64 +// for i := range x { +// sum += x[i] +// } +func Sum(x []float64) float64 { + var sum float64 + for _, v := range x { + sum += v + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s new file mode 100644 index 00000000..22eede6e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s @@ -0,0 +1,100 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define IDX AX +#define LEN CX +#define TAIL BX +#define SUM X0 +#define SUM_1 X1 +#define SUM_2 X2 +#define SUM_3 X3 + +// func Sum(x []float64) float64 +TEXT ·Sum(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ x_len+8(FP), LEN // LEN = len(x) + XORQ IDX, IDX // i = 0 + PXOR SUM, SUM // p_sum_i = 0 + CMPQ LEN, $0 // if LEN == 0 { return 0 } + JE sum_end + + PXOR SUM_1, SUM_1 + PXOR SUM_2, SUM_2 + PXOR SUM_3, SUM_3 + + MOVQ X_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + ADDSD (X_PTR), X0 // X0 += x[0] + INCQ IDX // i++ + DECQ LEN // LEN-- + DECQ TAIL // TAIL-- + JZ sum_end // if TAIL == 0 { return } + +no_trim: + MOVQ LEN, TAIL + SHRQ $4, LEN // LEN = floor( n / 16 ) + JZ sum_tail8 // if LEN == 0 { goto sum_tail8 } + +sum_loop: // sum 16x wide do { + ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(SI)(AX*8), SUM_1 + ADDPD 32(SI)(AX*8), SUM_2 + ADDPD 48(SI)(AX*8), SUM_3 + ADDPD 64(SI)(AX*8), SUM + ADDPD 80(SI)(AX*8), SUM_1 + ADDPD 96(SI)(AX*8), SUM_2 + ADDPD 112(SI)(AX*8), SUM_3 + ADDQ $16, IDX // i += 16 + DECQ LEN + JNZ sum_loop // } while --CX > 0 + +sum_tail8: + TESTQ $8, TAIL + JZ sum_tail4 + + ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(SI)(AX*8), SUM_1 + ADDPD 32(SI)(AX*8), SUM_2 + ADDPD 48(SI)(AX*8), SUM_3 + ADDQ $8, IDX + +sum_tail4: + ADDPD SUM_3, SUM + ADDPD SUM_2, SUM_1 + + TESTQ $4, TAIL + JZ sum_tail2 + + ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(SI)(AX*8), SUM_1 + ADDQ $4, IDX + +sum_tail2: + ADDPD SUM_1, SUM + + TESTQ $2, TAIL + JZ sum_tail1 + + ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] + ADDQ $2, IDX + +sum_tail1: + HADDPD SUM, SUM // sum_i[0] += sum_i[1] + + TESTQ $1, TAIL + JZ sum_end + + ADDSD (SI)(IDX*8), SUM + +sum_end: // return sum + MOVSD SUM, sum+24(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go new file mode 100644 index 00000000..ac6eb81c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go @@ -0,0 +1,14 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +import math "gonum.org/v1/gonum/internal/math32" + +// Abs returns the absolute value (also called the modulus) of x. +func Abs(x complex64) float32 { return math.Hypot(real(x), imag(x)) } diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go new file mode 100644 index 00000000..705262f2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go @@ -0,0 +1,12 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +// Conj returns the complex conjugate of x. +func Conj(x complex64) complex64 { return complex(real(x), -imag(x)) } diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go new file mode 100644 index 00000000..5424ea09 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go @@ -0,0 +1,7 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmplx64 provides complex64 versions of standard library math/cmplx +// package routines used by gonum/blas. +package cmplx64 // import "gonum.org/v1/gonum/internal/cmplx64" diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go new file mode 100644 index 00000000..21d3d180 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go @@ -0,0 +1,25 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +import math "gonum.org/v1/gonum/internal/math32" + +// IsInf returns true if either real(x) or imag(x) is an infinity. +func IsInf(x complex64) bool { + if math.IsInf(real(x), 0) || math.IsInf(imag(x), 0) { + return true + } + return false +} + +// Inf returns a complex infinity, complex(+Inf, +Inf). +func Inf() complex64 { + inf := math.Inf(1) + return complex(inf, inf) +} diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go new file mode 100644 index 00000000..7e0bf788 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +import math "gonum.org/v1/gonum/internal/math32" + +// IsNaN returns true if either real(x) or imag(x) is NaN +// and neither is an infinity. +func IsNaN(x complex64) bool { + switch { + case math.IsInf(real(x), 0) || math.IsInf(imag(x), 0): + return false + case math.IsNaN(real(x)) || math.IsNaN(imag(x)): + return true + } + return false +} + +// NaN returns a complex ``not-a-number'' value. +func NaN() complex64 { + nan := math.NaN() + return complex(nan, nan) +} diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go new file mode 100644 index 00000000..439987b4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go @@ -0,0 +1,108 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +import math "gonum.org/v1/gonum/internal/math32" + +// The original C code, the long comment, and the constants +// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c. +// The go code is a simplified version of the original C. +// +// Cephes Math Library Release 2.8: June, 2000 +// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier +// +// The readme file at http://netlib.sandia.gov/cephes/ says: +// Some software in this archive may be from the book _Methods and +// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster +// International, 1989) or from the Cephes Mathematical Library, a +// commercial product. In either event, it is copyrighted by the author. +// What you see here may be used freely but it comes with no support or +// guarantee. +// +// The two known misprints in the book are repaired here in the +// source listings for the gamma function and the incomplete beta +// integral. +// +// Stephen L. Moshier +// moshier@na-net.ornl.gov + +// Complex square root +// +// DESCRIPTION: +// +// If z = x + iy, r = |z|, then +// +// 1/2 +// Re w = [ (r + x)/2 ] , +// +// 1/2 +// Im w = [ (r - x)/2 ] . +// +// Cancelation error in r-x or r+x is avoided by using the +// identity 2 Re w Im w = y. +// +// Note that -w is also a square root of z. The root chosen +// is always in the right half plane and Im w has the same sign as y. +// +// ACCURACY: +// +// Relative error: +// arithmetic domain # trials peak rms +// DEC -10,+10 25000 3.2e-17 9.6e-18 +// IEEE -10,+10 1,000,000 2.9e-16 6.1e-17 + +// Sqrt returns the square root of x. +// The result r is chosen so that real(r) ≥ 0 and imag(r) has the same sign as imag(x). +func Sqrt(x complex64) complex64 { + if imag(x) == 0 { + if real(x) == 0 { + return complex(0, 0) + } + if real(x) < 0 { + return complex(0, math.Sqrt(-real(x))) + } + return complex(math.Sqrt(real(x)), 0) + } + if real(x) == 0 { + if imag(x) < 0 { + r := math.Sqrt(-0.5 * imag(x)) + return complex(r, -r) + } + r := math.Sqrt(0.5 * imag(x)) + return complex(r, r) + } + a := real(x) + b := imag(x) + var scale float32 + // Rescale to avoid internal overflow or underflow. + if math.Abs(a) > 4 || math.Abs(b) > 4 { + a *= 0.25 + b *= 0.25 + scale = 2 + } else { + a *= 1.8014398509481984e16 // 2**54 + b *= 1.8014398509481984e16 + scale = 7.450580596923828125e-9 // 2**-27 + } + r := math.Hypot(a, b) + var t float32 + if a > 0 { + t = math.Sqrt(0.5*r + 0.5*a) + r = scale * math.Abs((0.5*b)/t) + t *= scale + } else { + r = math.Sqrt(0.5*r - 0.5*a) + t = scale * math.Abs((0.5*b)/r) + r *= scale + } + if b < 0 { + return complex(t, -r) + } + return complex(t, r) +} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/doc.go b/vendor/gonum.org/v1/gonum/internal/math32/doc.go new file mode 100644 index 00000000..68917c64 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/doc.go @@ -0,0 +1,7 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package math32 provides float32 versions of standard library math package +// routines used by gonum/blas/native. +package math32 // import "gonum.org/v1/gonum/internal/math32" diff --git a/vendor/gonum.org/v1/gonum/internal/math32/math.go b/vendor/gonum.org/v1/gonum/internal/math32/math.go new file mode 100644 index 00000000..56c90be0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/math.go @@ -0,0 +1,111 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math32 + +import ( + "math" +) + +const ( + unan = 0x7fc00000 + uinf = 0x7f800000 + uneginf = 0xff800000 + mask = 0x7f8 >> 3 + shift = 32 - 8 - 1 + bias = 127 +) + +// Abs returns the absolute value of x. +// +// Special cases are: +// Abs(±Inf) = +Inf +// Abs(NaN) = NaN +func Abs(x float32) float32 { + switch { + case x < 0: + return -x + case x == 0: + return 0 // return correctly abs(-0) + } + return x +} + +// Copysign returns a value with the magnitude +// of x and the sign of y. +func Copysign(x, y float32) float32 { + const sign = 1 << 31 + return math.Float32frombits(math.Float32bits(x)&^sign | math.Float32bits(y)&sign) +} + +// Hypot returns Sqrt(p*p + q*q), taking care to avoid +// unnecessary overflow and underflow. +// +// Special cases are: +// Hypot(±Inf, q) = +Inf +// Hypot(p, ±Inf) = +Inf +// Hypot(NaN, q) = NaN +// Hypot(p, NaN) = NaN +func Hypot(p, q float32) float32 { + // special cases + switch { + case IsInf(p, 0) || IsInf(q, 0): + return Inf(1) + case IsNaN(p) || IsNaN(q): + return NaN() + } + if p < 0 { + p = -p + } + if q < 0 { + q = -q + } + if p < q { + p, q = q, p + } + if p == 0 { + return 0 + } + q = q / p + return p * Sqrt(1+q*q) +} + +// Inf returns positive infinity if sign >= 0, negative infinity if sign < 0. +func Inf(sign int) float32 { + var v uint32 + if sign >= 0 { + v = uinf + } else { + v = uneginf + } + return math.Float32frombits(v) +} + +// IsInf reports whether f is an infinity, according to sign. +// If sign > 0, IsInf reports whether f is positive infinity. +// If sign < 0, IsInf reports whether f is negative infinity. +// If sign == 0, IsInf reports whether f is either infinity. +func IsInf(f float32, sign int) bool { + // Test for infinity by comparing against maximum float. + // To avoid the floating-point hardware, could use: + // x := math.Float32bits(f); + // return sign >= 0 && x == uinf || sign <= 0 && x == uneginf; + return sign >= 0 && f > math.MaxFloat32 || sign <= 0 && f < -math.MaxFloat32 +} + +// IsNaN reports whether f is an IEEE 754 ``not-a-number'' value. +func IsNaN(f float32) (is bool) { + // IEEE 754 says that only NaNs satisfy f != f. + // To avoid the floating-point hardware, could use: + // x := math.Float32bits(f); + // return uint32(x>>shift)&mask == mask && x != uinf && x != uneginf + return f != f +} + +// NaN returns an IEEE 754 ``not-a-number'' value. +func NaN() float32 { return math.Float32frombits(unan) } diff --git a/vendor/gonum.org/v1/gonum/internal/math32/signbit.go b/vendor/gonum.org/v1/gonum/internal/math32/signbit.go new file mode 100644 index 00000000..3e9f0bb4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/signbit.go @@ -0,0 +1,16 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math32 + +import "math" + +// Signbit returns true if x is negative or negative zero. +func Signbit(x float32) bool { + return math.Float32bits(x)&(1<<31) != 0 +} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go b/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go new file mode 100644 index 00000000..bf630de9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go @@ -0,0 +1,25 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package math32 + +import ( + "math" +) + +// Sqrt returns the square root of x. +// +// Special cases are: +// Sqrt(+Inf) = +Inf +// Sqrt(±0) = ±0 +// Sqrt(x < 0) = NaN +// Sqrt(NaN) = NaN +func Sqrt(x float32) float32 { + // FIXME(kortschak): Direct translation of the math package + // asm code for 386 fails to build. No test hardware is available + // for arm, so using conversion instead. + return float32(math.Sqrt(float64(x))) +} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go new file mode 100644 index 00000000..905ae5c6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package math32 + +// Sqrt returns the square root of x. +// +// Special cases are: +// Sqrt(+Inf) = +Inf +// Sqrt(±0) = ±0 +// Sqrt(x < 0) = NaN +// Sqrt(NaN) = NaN +func Sqrt(x float32) float32 diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s new file mode 100644 index 00000000..fa2b8696 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +// TODO(kortschak): use textflag.h after we drop Go 1.3 support +//#include "textflag.h" +// Don't insert stack check preamble. +#define NOSPLIT 4 + +// func Sqrt(x float32) float32 +TEXT ·Sqrt(SB),NOSPLIT,$0 + SQRTSS x+0(FP), X0 + MOVSS X0, ret+8(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/lapack/.gitignore b/vendor/gonum.org/v1/gonum/lapack/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/vendor/gonum.org/v1/gonum/lapack/README.md b/vendor/gonum.org/v1/gonum/lapack/README.md new file mode 100644 index 00000000..c355017c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/README.md @@ -0,0 +1,28 @@ +Gonum LAPACK [![GoDoc](https://godoc.org/gonum.org/v1/gonum/lapack?status.svg)](https://godoc.org/gonum.org/v1/gonum/lapack) +====== + +A collection of packages to provide LAPACK functionality for the Go programming +language (http://golang.org). This provides a partial implementation in native go +and a wrapper using cgo to a c-based implementation. + +## Installation + +``` + go get gonum.org/v1/gonum/lapack/... +``` + +## Packages + +### lapack + +Defines the LAPACK API based on http://www.netlib.org/lapack/lapacke.html + +### lapack/gonum + +Go implementation of the LAPACK API (incomplete, implements the `float64` API). + +### lapack/lapack64 + +Wrappers for an implementation of the double (i.e., `float64`) precision real parts of +the LAPACK API. + diff --git a/vendor/gonum.org/v1/gonum/lapack/doc.go b/vendor/gonum.org/v1/gonum/lapack/doc.go new file mode 100644 index 00000000..2475cb4a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lapack provides interfaces for the LAPACK linear algebra standard. +package lapack // import "gonum.org/v1/gonum/lapack" diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go new file mode 100644 index 00000000..5f3833fd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go @@ -0,0 +1,505 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dbdsqr performs a singular value decomposition of a real n×n bidiagonal matrix. +// +// The SVD of the bidiagonal matrix B is +// B = Q * S * P^T +// where S is a diagonal matrix of singular values, Q is an orthogonal matrix of +// left singular vectors, and P is an orthogonal matrix of right singular vectors. +// +// Q and P are only computed if requested. If left singular vectors are requested, +// this routine returns U * Q instead of Q, and if right singular vectors are +// requested P^T * VT is returned instead of P^T. +// +// Frequently Dbdsqr is used in conjunction with Dgebrd which reduces a general +// matrix A into bidiagonal form. In this case, the SVD of A is +// A = (U * Q) * S * (P^T * VT) +// +// This routine may also compute Q^T * C. +// +// d and e contain the elements of the bidiagonal matrix b. d must have length at +// least n, and e must have length at least n-1. Dbdsqr will panic if there is +// insufficient length. On exit, D contains the singular values of B in decreasing +// order. +// +// VT is a matrix of size n×ncvt whose elements are stored in vt. The elements +// of vt are modified to contain P^T * VT on exit. VT is not used if ncvt == 0. +// +// U is a matrix of size nru×n whose elements are stored in u. The elements +// of u are modified to contain U * Q on exit. U is not used if nru == 0. +// +// C is a matrix of size n×ncc whose elements are stored in c. The elements +// of c are modified to contain Q^T * C on exit. C is not used if ncc == 0. +// +// work contains temporary storage and must have length at least 4*(n-1). Dbdsqr +// will panic if there is insufficient working memory. +// +// Dbdsqr returns whether the decomposition was successful. +// +// Dbdsqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dbdsqr(uplo blas.Uplo, n, ncvt, nru, ncc int, d, e, vt []float64, ldvt int, u []float64, ldu int, c []float64, ldc int, work []float64) (ok bool) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case ncvt < 0: + panic(ncvtLT0) + case nru < 0: + panic(nruLT0) + case ncc < 0: + panic(nccLT0) + case ldvt < max(1, ncvt): + panic(badLdVT) + case (ldu < max(1, n) && nru > 0) || (ldu < 1 && nru == 0): + panic(badLdU) + case ldc < max(1, ncc): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return true + } + + if len(vt) < (n-1)*ldvt+ncvt && ncvt != 0 { + panic(shortVT) + } + if len(u) < (nru-1)*ldu+n && nru != 0 { + panic(shortU) + } + if len(c) < (n-1)*ldc+ncc && ncc != 0 { + panic(shortC) + } + if len(d) < n { + panic(shortD) + } + if len(e) < n-1 { + panic(shortE) + } + if len(work) < 4*(n-1) { + panic(shortWork) + } + + var info int + bi := blas64.Implementation() + const maxIter = 6 + + if n != 1 { + // If the singular vectors do not need to be computed, use qd algorithm. + if !(ncvt > 0 || nru > 0 || ncc > 0) { + info = impl.Dlasq1(n, d, e, work) + // If info is 2 dqds didn't finish, and so try to. + if info != 2 { + return info == 0 + } + } + nm1 := n - 1 + nm12 := nm1 + nm1 + nm13 := nm12 + nm1 + idir := 0 + + eps := dlamchE + unfl := dlamchS + lower := uplo == blas.Lower + var cs, sn, r float64 + if lower { + for i := 0; i < n-1; i++ { + cs, sn, r = impl.Dlartg(d[i], e[i]) + d[i] = r + e[i] = sn * d[i+1] + d[i+1] *= cs + work[i] = cs + work[nm1+i] = sn + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, n, work, work[n-1:], u, ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, n, ncc, work, work[n-1:], c, ldc) + } + } + // Compute singular values to a relative accuracy of tol. If tol is negative + // the values will be computed to an absolute accuracy of math.Abs(tol) * norm(b) + tolmul := math.Max(10, math.Min(100, math.Pow(eps, -1.0/8))) + tol := tolmul * eps + var smax float64 + for i := 0; i < n; i++ { + smax = math.Max(smax, math.Abs(d[i])) + } + for i := 0; i < n-1; i++ { + smax = math.Max(smax, math.Abs(e[i])) + } + + var sminl float64 + var thresh float64 + if tol >= 0 { + sminoa := math.Abs(d[0]) + if sminoa != 0 { + mu := sminoa + for i := 1; i < n; i++ { + mu = math.Abs(d[i]) * (mu / (mu + math.Abs(e[i-1]))) + sminoa = math.Min(sminoa, mu) + if sminoa == 0 { + break + } + } + } + sminoa = sminoa / math.Sqrt(float64(n)) + thresh = math.Max(tol*sminoa, float64(maxIter*n*n)*unfl) + } else { + thresh = math.Max(math.Abs(tol)*smax, float64(maxIter*n*n)*unfl) + } + // Prepare for the main iteration loop for the singular values. + maxIt := maxIter * n * n + iter := 0 + oldl2 := -1 + oldm := -1 + // m points to the last element of unconverged part of matrix. + m := n + + Outer: + for m > 1 { + if iter > maxIt { + info = 0 + for i := 0; i < n-1; i++ { + if e[i] != 0 { + info++ + } + } + return info == 0 + } + // Find diagonal block of matrix to work on. + if tol < 0 && math.Abs(d[m-1]) <= thresh { + d[m-1] = 0 + } + smax = math.Abs(d[m-1]) + smin := smax + var l2 int + var broke bool + for l3 := 0; l3 < m-1; l3++ { + l2 = m - l3 - 2 + abss := math.Abs(d[l2]) + abse := math.Abs(e[l2]) + if tol < 0 && abss <= thresh { + d[l2] = 0 + } + if abse <= thresh { + broke = true + break + } + smin = math.Min(smin, abss) + smax = math.Max(math.Max(smax, abss), abse) + } + if broke { + e[l2] = 0 + if l2 == m-2 { + // Convergence of bottom singular value, return to top. + m-- + continue + } + l2++ + } else { + l2 = 0 + } + // e[ll] through e[m-2] are nonzero, e[ll-1] is zero + if l2 == m-2 { + // Handle 2×2 block separately. + var sinr, cosr, sinl, cosl float64 + d[m-1], d[m-2], sinr, cosr, sinl, cosl = impl.Dlasv2(d[m-2], e[m-2], d[m-1]) + e[m-2] = 0 + if ncvt > 0 { + bi.Drot(ncvt, vt[(m-2)*ldvt:], 1, vt[(m-1)*ldvt:], 1, cosr, sinr) + } + if nru > 0 { + bi.Drot(nru, u[m-2:], ldu, u[m-1:], ldu, cosl, sinl) + } + if ncc > 0 { + bi.Drot(ncc, c[(m-2)*ldc:], 1, c[(m-1)*ldc:], 1, cosl, sinl) + } + m -= 2 + continue + } + // If working on a new submatrix, choose shift direction from larger end + // diagonal element toward smaller. + if l2 > oldm-1 || m-1 < oldl2 { + if math.Abs(d[l2]) >= math.Abs(d[m-1]) { + idir = 1 + } else { + idir = 2 + } + } + // Apply convergence tests. + // TODO(btracey): There is a lot of similar looking code here. See + // if there is a better way to de-duplicate. + if idir == 1 { + // Run convergence test in forward direction. + // First apply standard test to bottom of matrix. + if math.Abs(e[m-2]) <= math.Abs(tol)*math.Abs(d[m-1]) || (tol < 0 && math.Abs(e[m-2]) <= thresh) { + e[m-2] = 0 + continue + } + if tol >= 0 { + // If relative accuracy desired, apply convergence criterion forward. + mu := math.Abs(d[l2]) + sminl = mu + for l3 := l2; l3 < m-1; l3++ { + if math.Abs(e[l3]) <= tol*mu { + e[l3] = 0 + continue Outer + } + mu = math.Abs(d[l3+1]) * (mu / (mu + math.Abs(e[l3]))) + sminl = math.Min(sminl, mu) + } + } + } else { + // Run convergence test in backward direction. + // First apply standard test to top of matrix. + if math.Abs(e[l2]) <= math.Abs(tol)*math.Abs(d[l2]) || (tol < 0 && math.Abs(e[l2]) <= thresh) { + e[l2] = 0 + continue + } + if tol >= 0 { + // If relative accuracy desired, apply convergence criterion backward. + mu := math.Abs(d[m-1]) + sminl = mu + for l3 := m - 2; l3 >= l2; l3-- { + if math.Abs(e[l3]) <= tol*mu { + e[l3] = 0 + continue Outer + } + mu = math.Abs(d[l3]) * (mu / (mu + math.Abs(e[l3]))) + sminl = math.Min(sminl, mu) + } + } + } + oldl2 = l2 + oldm = m + // Compute shift. First, test if shifting would ruin relative accuracy, + // and if so set the shift to zero. + var shift float64 + if tol >= 0 && float64(n)*tol*(sminl/smax) <= math.Max(eps, (1.0/100)*tol) { + shift = 0 + } else { + var sl2 float64 + if idir == 1 { + sl2 = math.Abs(d[l2]) + shift, _ = impl.Dlas2(d[m-2], e[m-2], d[m-1]) + } else { + sl2 = math.Abs(d[m-1]) + shift, _ = impl.Dlas2(d[l2], e[l2], d[l2+1]) + } + // Test if shift is negligible + if sl2 > 0 { + if (shift/sl2)*(shift/sl2) < eps { + shift = 0 + } + } + } + iter += m - l2 + 1 + // If no shift, do simplified QR iteration. + if shift == 0 { + if idir == 1 { + cs := 1.0 + oldcs := 1.0 + var sn, r, oldsn float64 + for i := l2; i < m-1; i++ { + cs, sn, r = impl.Dlartg(d[i]*cs, e[i]) + if i > l2 { + e[i-1] = oldsn * r + } + oldcs, oldsn, d[i] = impl.Dlartg(oldcs*r, d[i+1]*sn) + work[i-l2] = cs + work[i-l2+nm1] = sn + work[i-l2+nm12] = oldcs + work[i-l2+nm13] = oldsn + } + h := d[m-1] * cs + d[m-1] = h * oldcs + e[m-2] = h * oldsn + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncvt, work, work[n-1:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, m-l2, work[nm12:], work[nm13:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncc, work[nm12:], work[nm13:], c[l2*ldc:], ldc) + } + if math.Abs(e[m-2]) < thresh { + e[m-2] = 0 + } + } else { + cs := 1.0 + oldcs := 1.0 + var sn, r, oldsn float64 + for i := m - 1; i >= l2+1; i-- { + cs, sn, r = impl.Dlartg(d[i]*cs, e[i-1]) + if i < m-1 { + e[i] = oldsn * r + } + oldcs, oldsn, d[i] = impl.Dlartg(oldcs*r, d[i-1]*sn) + work[i-l2-1] = cs + work[i-l2+nm1-1] = -sn + work[i-l2+nm12-1] = oldcs + work[i-l2+nm13-1] = -oldsn + } + h := d[l2] * cs + d[l2] = h * oldcs + e[l2] = h * oldsn + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncvt, work[nm12:], work[nm13:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, nru, m-l2, work, work[n-1:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncc, work, work[n-1:], c[l2*ldc:], ldc) + } + if math.Abs(e[l2]) <= thresh { + e[l2] = 0 + } + } + } else { + // Use nonzero shift. + if idir == 1 { + // Chase bulge from top to bottom. Save cosines and sines for + // later singular vector updates. + f := (math.Abs(d[l2]) - shift) * (math.Copysign(1, d[l2]) + shift/d[l2]) + g := e[l2] + var cosl, sinl float64 + for i := l2; i < m-1; i++ { + cosr, sinr, r := impl.Dlartg(f, g) + if i > l2 { + e[i-1] = r + } + f = cosr*d[i] + sinr*e[i] + e[i] = cosr*e[i] - sinr*d[i] + g = sinr * d[i+1] + d[i+1] *= cosr + cosl, sinl, r = impl.Dlartg(f, g) + d[i] = r + f = cosl*e[i] + sinl*d[i+1] + d[i+1] = cosl*d[i+1] - sinl*e[i] + if i < m-2 { + g = sinl * e[i+1] + e[i+1] = cosl * e[i+1] + } + work[i-l2] = cosr + work[i-l2+nm1] = sinr + work[i-l2+nm12] = cosl + work[i-l2+nm13] = sinl + } + e[m-2] = f + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncvt, work, work[n-1:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, m-l2, work[nm12:], work[nm13:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncc, work[nm12:], work[nm13:], c[l2*ldc:], ldc) + } + if math.Abs(e[m-2]) <= thresh { + e[m-2] = 0 + } + } else { + // Chase bulge from top to bottom. Save cosines and sines for + // later singular vector updates. + f := (math.Abs(d[m-1]) - shift) * (math.Copysign(1, d[m-1]) + shift/d[m-1]) + g := e[m-2] + for i := m - 1; i > l2; i-- { + cosr, sinr, r := impl.Dlartg(f, g) + if i < m-1 { + e[i] = r + } + f = cosr*d[i] + sinr*e[i-1] + e[i-1] = cosr*e[i-1] - sinr*d[i] + g = sinr * d[i-1] + d[i-1] *= cosr + cosl, sinl, r := impl.Dlartg(f, g) + d[i] = r + f = cosl*e[i-1] + sinl*d[i-1] + d[i-1] = cosl*d[i-1] - sinl*e[i-1] + if i > l2+1 { + g = sinl * e[i-2] + e[i-2] *= cosl + } + work[i-l2-1] = cosr + work[i-l2+nm1-1] = -sinr + work[i-l2+nm12-1] = cosl + work[i-l2+nm13-1] = -sinl + } + e[l2] = f + if math.Abs(e[l2]) <= thresh { + e[l2] = 0 + } + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncvt, work[nm12:], work[nm13:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, nru, m-l2, work, work[n-1:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncc, work, work[n-1:], c[l2*ldc:], ldc) + } + } + } + } + } + + // All singular values converged, make them positive. + for i := 0; i < n; i++ { + if d[i] < 0 { + d[i] *= -1 + if ncvt > 0 { + bi.Dscal(ncvt, -1, vt[i*ldvt:], 1) + } + } + } + + // Sort the singular values in decreasing order. + for i := 0; i < n-1; i++ { + isub := 0 + smin := d[0] + for j := 1; j < n-i; j++ { + if d[j] <= smin { + isub = j + smin = d[j] + } + } + if isub != n-i { + // Swap singular values and vectors. + d[isub] = d[n-i-1] + d[n-i-1] = smin + if ncvt > 0 { + bi.Dswap(ncvt, vt[isub*ldvt:], 1, vt[(n-i-1)*ldvt:], 1) + } + if nru > 0 { + bi.Dswap(nru, u[isub:], ldu, u[n-i-1:], ldu) + } + if ncc > 0 { + bi.Dswap(ncc, c[isub*ldc:], 1, c[(n-i-1)*ldc:], 1) + } + } + } + info = 0 + for i := 0; i < n-1; i++ { + if e[i] != 0 { + info++ + } + } + return info == 0 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go new file mode 100644 index 00000000..7caa0b17 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go @@ -0,0 +1,89 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgebak updates an n×m matrix V as +// V = P D V, if side == lapack.EVRight, +// V = P D^{-1} V, if side == lapack.EVLeft, +// where P and D are n×n permutation and scaling matrices, respectively, +// implicitly represented by job, scale, ilo and ihi as returned by Dgebal. +// +// Typically, columns of the matrix V contain the right or left (determined by +// side) eigenvectors of the balanced matrix output by Dgebal, and Dgebak forms +// the eigenvectors of the original matrix. +// +// Dgebak is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebak(job lapack.BalanceJob, side lapack.EVSide, n, ilo, ihi int, scale []float64, m int, v []float64, ldv int) { + switch { + case job != lapack.BalanceNone && job != lapack.Permute && job != lapack.Scale && job != lapack.PermuteScale: + panic(badBalanceJob) + case side != lapack.EVLeft && side != lapack.EVRight: + panic(badEVSide) + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case m < 0: + panic(mLT0) + case ldv < max(1, m): + panic(badLdV) + } + + // Quick return if possible. + if n == 0 || m == 0 { + return + } + + if len(scale) < n { + panic(shortScale) + } + if len(v) < (n-1)*ldv+m { + panic(shortV) + } + + // Quick return if possible. + if job == lapack.BalanceNone { + return + } + + bi := blas64.Implementation() + if ilo != ihi && job != lapack.Permute { + // Backward balance. + if side == lapack.EVRight { + for i := ilo; i <= ihi; i++ { + bi.Dscal(m, scale[i], v[i*ldv:], 1) + } + } else { + for i := ilo; i <= ihi; i++ { + bi.Dscal(m, 1/scale[i], v[i*ldv:], 1) + } + } + } + if job == lapack.Scale { + return + } + // Backward permutation. + for i := ilo - 1; i >= 0; i-- { + k := int(scale[i]) + if k == i { + continue + } + bi.Dswap(m, v[i*ldv:], 1, v[k*ldv:], 1) + } + for i := ihi + 1; i < n; i++ { + k := int(scale[i]) + if k == i { + continue + } + bi.Dswap(m, v[i*ldv:], 1, v[k*ldv:], 1) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go new file mode 100644 index 00000000..6fb5170c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go @@ -0,0 +1,239 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgebal balances an n×n matrix A. Balancing consists of two stages, permuting +// and scaling. Both steps are optional and depend on the value of job. +// +// Permuting consists of applying a permutation matrix P such that the matrix +// that results from P^T*A*P takes the upper block triangular form +// [ T1 X Y ] +// P^T A P = [ 0 B Z ], +// [ 0 0 T2 ] +// where T1 and T2 are upper triangular matrices and B contains at least one +// nonzero off-diagonal element in each row and column. The indices ilo and ihi +// mark the starting and ending columns of the submatrix B. The eigenvalues of A +// isolated in the first 0 to ilo-1 and last ihi+1 to n-1 elements on the +// diagonal can be read off without any roundoff error. +// +// Scaling consists of applying a diagonal similarity transformation D such that +// D^{-1}*B*D has the 1-norm of each row and its corresponding column nearly +// equal. The output matrix is +// [ T1 X*D Y ] +// [ 0 inv(D)*B*D inv(D)*Z ]. +// [ 0 0 T2 ] +// Scaling may reduce the 1-norm of the matrix, and improve the accuracy of +// the computed eigenvalues and/or eigenvectors. +// +// job specifies the operations that will be performed on A. +// If job is lapack.BalanceNone, Dgebal sets scale[i] = 1 for all i and returns ilo=0, ihi=n-1. +// If job is lapack.Permute, only permuting will be done. +// If job is lapack.Scale, only scaling will be done. +// If job is lapack.PermuteScale, both permuting and scaling will be done. +// +// On return, if job is lapack.Permute or lapack.PermuteScale, it will hold that +// A[i,j] == 0, for i > j and j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}. +// If job is lapack.BalanceNone or lapack.Scale, or if n == 0, it will hold that +// ilo == 0 and ihi == n-1. +// +// On return, scale will contain information about the permutations and scaling +// factors applied to A. If π(j) denotes the index of the column interchanged +// with column j, and D[j,j] denotes the scaling factor applied to column j, +// then +// scale[j] == π(j), for j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}, +// == D[j,j], for j ∈ {ilo, ..., ihi}. +// scale must have length equal to n, otherwise Dgebal will panic. +// +// Dgebal is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebal(job lapack.BalanceJob, n int, a []float64, lda int, scale []float64) (ilo, ihi int) { + switch { + case job != lapack.BalanceNone && job != lapack.Permute && job != lapack.Scale && job != lapack.PermuteScale: + panic(badBalanceJob) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + ilo = 0 + ihi = n - 1 + + if n == 0 { + return ilo, ihi + } + + if len(scale) != n { + panic(shortScale) + } + + if job == lapack.BalanceNone { + for i := range scale { + scale[i] = 1 + } + return ilo, ihi + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + bi := blas64.Implementation() + swapped := true + + if job == lapack.Scale { + goto scaling + } + + // Permutation to isolate eigenvalues if possible. + // + // Search for rows isolating an eigenvalue and push them down. + for swapped { + swapped = false + rows: + for i := ihi; i >= 0; i-- { + for j := 0; j <= ihi; j++ { + if i == j { + continue + } + if a[i*lda+j] != 0 { + continue rows + } + } + // Row i has only zero off-diagonal elements in the + // block A[ilo:ihi+1,ilo:ihi+1]. + scale[ihi] = float64(i) + if i != ihi { + bi.Dswap(ihi+1, a[i:], lda, a[ihi:], lda) + bi.Dswap(n, a[i*lda:], 1, a[ihi*lda:], 1) + } + if ihi == 0 { + scale[0] = 1 + return ilo, ihi + } + ihi-- + swapped = true + break + } + } + // Search for columns isolating an eigenvalue and push them left. + swapped = true + for swapped { + swapped = false + columns: + for j := ilo; j <= ihi; j++ { + for i := ilo; i <= ihi; i++ { + if i == j { + continue + } + if a[i*lda+j] != 0 { + continue columns + } + } + // Column j has only zero off-diagonal elements in the + // block A[ilo:ihi+1,ilo:ihi+1]. + scale[ilo] = float64(j) + if j != ilo { + bi.Dswap(ihi+1, a[j:], lda, a[ilo:], lda) + bi.Dswap(n-ilo, a[j*lda+ilo:], 1, a[ilo*lda+ilo:], 1) + } + swapped = true + ilo++ + break + } + } + +scaling: + for i := ilo; i <= ihi; i++ { + scale[i] = 1 + } + + if job == lapack.Permute { + return ilo, ihi + } + + // Balance the submatrix in rows ilo to ihi. + + const ( + // sclfac should be a power of 2 to avoid roundoff errors. + // Elements of scale are restricted to powers of sclfac, + // therefore the matrix will be only nearly balanced. + sclfac = 2 + // factor determines the minimum reduction of the row and column + // norms that is considered non-negligible. It must be less than 1. + factor = 0.95 + ) + sfmin1 := dlamchS / dlamchP + sfmax1 := 1 / sfmin1 + sfmin2 := sfmin1 * sclfac + sfmax2 := 1 / sfmin2 + + // Iterative loop for norm reduction. + var conv bool + for !conv { + conv = true + for i := ilo; i <= ihi; i++ { + c := bi.Dnrm2(ihi-ilo+1, a[ilo*lda+i:], lda) + r := bi.Dnrm2(ihi-ilo+1, a[i*lda+ilo:], 1) + ica := bi.Idamax(ihi+1, a[i:], lda) + ca := math.Abs(a[ica*lda+i]) + ira := bi.Idamax(n-ilo, a[i*lda+ilo:], 1) + ra := math.Abs(a[i*lda+ilo+ira]) + + // Guard against zero c or r due to underflow. + if c == 0 || r == 0 { + continue + } + g := r / sclfac + f := 1.0 + s := c + r + for c < g && math.Max(f, math.Max(c, ca)) < sfmax2 && math.Min(r, math.Min(g, ra)) > sfmin2 { + if math.IsNaN(c + f + ca + r + g + ra) { + // Panic if NaN to avoid infinite loop. + panic("lapack: NaN") + } + f *= sclfac + c *= sclfac + ca *= sclfac + g /= sclfac + r /= sclfac + ra /= sclfac + } + g = c / sclfac + for r <= g && math.Max(r, ra) < sfmax2 && math.Min(math.Min(f, c), math.Min(g, ca)) > sfmin2 { + f /= sclfac + c /= sclfac + ca /= sclfac + g /= sclfac + r *= sclfac + ra *= sclfac + } + + if c+r >= factor*s { + // Reduction would be negligible. + continue + } + if f < 1 && scale[i] < 1 && f*scale[i] <= sfmin1 { + continue + } + if f > 1 && scale[i] > 1 && scale[i] >= sfmax1/f { + continue + } + + // Now balance. + scale[i] *= f + bi.Dscal(n-ilo, 1/f, a[i*lda+ilo:], 1) + bi.Dscal(ihi+1, f, a[i:], lda) + conv = false + } + } + return ilo, ihi +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go new file mode 100644 index 00000000..cf951a12 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go @@ -0,0 +1,86 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgebd2 reduces an m×n matrix A to upper or lower bidiagonal form by an orthogonal +// transformation. +// Q^T * A * P = B +// if m >= n, B is upper diagonal, otherwise B is lower bidiagonal. +// d is the diagonal, len = min(m,n) +// e is the off-diagonal len = min(m,n)-1 +// +// Dgebd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebd2(m, n int, a []float64, lda int, d, e, tauQ, tauP, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + minmn := min(m, n) + if minmn == 0 { + return + } + + switch { + case len(d) < minmn: + panic(shortD) + case len(e) < minmn-1: + panic(shortE) + case len(tauQ) < minmn: + panic(shortTauQ) + case len(tauP) < minmn: + panic(shortTauP) + case len(work) < max(m, n): + panic(shortWork) + } + + if m >= n { + for i := 0; i < n; i++ { + a[i*lda+i], tauQ[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min(i+1, m-1)*lda+i:], lda) + d[i] = a[i*lda+i] + a[i*lda+i] = 1 + // Apply H_i to A[i:m, i+1:n] from the left. + if i < n-1 { + impl.Dlarf(blas.Left, m-i, n-i-1, a[i*lda+i:], lda, tauQ[i], a[i*lda+i+1:], lda, work) + } + a[i*lda+i] = d[i] + if i < n-1 { + a[i*lda+i+1], tauP[i] = impl.Dlarfg(n-i-1, a[i*lda+i+1], a[i*lda+min(i+2, n-1):], 1) + e[i] = a[i*lda+i+1] + a[i*lda+i+1] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i-1, a[i*lda+i+1:], 1, tauP[i], a[(i+1)*lda+i+1:], lda, work) + a[i*lda+i+1] = e[i] + } else { + tauP[i] = 0 + } + } + return + } + for i := 0; i < m; i++ { + a[i*lda+i], tauP[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + d[i] = a[i*lda+i] + a[i*lda+i] = 1 + if i < m-1 { + impl.Dlarf(blas.Right, m-i-1, n-i, a[i*lda+i:], 1, tauP[i], a[(i+1)*lda+i:], lda, work) + } + a[i*lda+i] = d[i] + if i < m-1 { + a[(i+1)*lda+i], tauQ[i] = impl.Dlarfg(m-i-1, a[(i+1)*lda+i], a[min(i+2, m-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + a[(i+1)*lda+i] = 1 + impl.Dlarf(blas.Left, m-i-1, n-i-1, a[(i+1)*lda+i:], lda, tauQ[i], a[(i+1)*lda+i+1:], lda, work) + a[(i+1)*lda+i] = e[i] + } else { + tauQ[i] = 0 + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go new file mode 100644 index 00000000..f03bf8d9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go @@ -0,0 +1,161 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgebrd reduces a general m×n matrix A to upper or lower bidiagonal form B by +// an orthogonal transformation: +// Q^T * A * P = B. +// The diagonal elements of B are stored in d and the off-diagonal elements are stored +// in e. These are additionally stored along the diagonal of A and the off-diagonal +// of A. If m >= n B is an upper-bidiagonal matrix, and if m < n B is a +// lower-bidiagonal matrix. +// +// The remaining elements of A store the data needed to construct Q and P. +// The matrices Q and P are products of elementary reflectors +// if m >= n, Q = H_0 * H_1 * ... * H_{n-1}, +// P = G_0 * G_1 * ... * G_{n-2}, +// if m < n, Q = H_0 * H_1 * ... * H_{m-2}, +// P = G_0 * G_1 * ... * G_{m-1}, +// where +// H_i = I - tauQ[i] * v_i * v_i^T, +// G_i = I - tauP[i] * u_i * u_i^T. +// +// As an example, on exit the entries of A when m = 6, and n = 5 +// [ d e u1 u1 u1] +// [v1 d e u2 u2] +// [v1 v2 d e u3] +// [v1 v2 v3 d e] +// [v1 v2 v3 v4 d] +// [v1 v2 v3 v4 v5] +// and when m = 5, n = 6 +// [ d u1 u1 u1 u1 u1] +// [ e d u2 u2 u2 u2] +// [v1 e d u3 u3 u3] +// [v1 v2 e d u4 u4] +// [v1 v2 v3 e d u5] +// +// d, tauQ, and tauP must all have length at least min(m,n), and e must have +// length min(m,n) - 1, unless lwork is -1 when there is no check except for +// work which must have a length of at least one. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(1,m,n) or be -1 and this function will panic otherwise. +// Dgebrd is blocked decomposition, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgebrd, +// the optimal work length will be stored into work[0]. +// +// Dgebrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebrd(m, n int, a []float64, lda int, d, e, tauQ, tauP, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, max(m, n)) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + minmn := min(m, n) + if minmn == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DGEBRD", " ", m, n, -1, -1) + lwkopt := (m + n) * nb + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(d) < minmn: + panic(shortD) + case len(e) < minmn-1: + panic(shortE) + case len(tauQ) < minmn: + panic(shortTauQ) + case len(tauP) < minmn: + panic(shortTauP) + } + + nx := minmn + ws := max(m, n) + if 1 < nb && nb < minmn { + // At least one blocked operation can be done. + // Get the crossover point nx. + nx = max(nb, impl.Ilaenv(3, "DGEBRD", " ", m, n, -1, -1)) + // Determine when to switch from blocked to unblocked code. + if nx < minmn { + // At least one blocked operation will be done. + ws = (m + n) * nb + if lwork < ws { + // Not enough work space for the optimal nb, + // consider using a smaller block size. + nbmin := impl.Ilaenv(2, "DGEBRD", " ", m, n, -1, -1) + if lwork >= (m+n)*nbmin { + // Enough work space for minimum block size. + nb = lwork / (m + n) + } else { + nb = minmn + nx = minmn + } + } + } + } + bi := blas64.Implementation() + ldworkx := nb + ldworky := nb + var i int + for i = 0; i < minmn-nx; i += nb { + // Reduce rows and columns i:i+nb to bidiagonal form and return + // the matrices X and Y which are needed to update the unreduced + // part of the matrix. + // X is stored in the first m rows of work, y in the next rows. + x := work[:m*ldworkx] + y := work[m*ldworkx:] + impl.Dlabrd(m-i, n-i, nb, a[i*lda+i:], lda, + d[i:], e[i:], tauQ[i:], tauP[i:], + x, ldworkx, y, ldworky) + + // Update the trailing submatrix A[i+nb:m,i+nb:n], using an update + // of the form A := A - V*Y**T - X*U**T + bi.Dgemm(blas.NoTrans, blas.Trans, m-i-nb, n-i-nb, nb, + -1, a[(i+nb)*lda+i:], lda, y[nb*ldworky:], ldworky, + 1, a[(i+nb)*lda+i+nb:], lda) + + bi.Dgemm(blas.NoTrans, blas.NoTrans, m-i-nb, n-i-nb, nb, + -1, x[nb*ldworkx:], ldworkx, a[i*lda+i+nb:], lda, + 1, a[(i+nb)*lda+i+nb:], lda) + + // Copy diagonal and off-diagonal elements of B back into A. + if m >= n { + for j := i; j < i+nb; j++ { + a[j*lda+j] = d[j] + a[j*lda+j+1] = e[j] + } + } else { + for j := i; j < i+nb; j++ { + a[j*lda+j] = d[j] + a[(j+1)*lda+j] = e[j] + } + } + } + // Use unblocked code to reduce the remainder of the matrix. + impl.Dgebd2(m-i, n-i, a[i*lda+i:], lda, d[i:], e[i:], tauQ[i:], tauP[i:], work) + work[0] = float64(ws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go new file mode 100644 index 00000000..1d1ca586 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go @@ -0,0 +1,92 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgecon estimates the reciprocal of the condition number of the n×n matrix A +// given the LU decomposition of the matrix. The condition number computed may +// be based on the 1-norm or the ∞-norm. +// +// The slice a contains the result of the LU decomposition of A as computed by Dgetrf. +// +// anorm is the corresponding 1-norm or ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 4*n and Dgecon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dgecon will panic otherwise. +func (impl Implementation) Dgecon(norm lapack.MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 { + switch { + case norm != lapack.MaxColumnSum && norm != lapack.MaxRowSum: + panic(badNorm) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return 1 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(work) < 4*n: + panic(shortWork) + case len(iwork) < n: + panic(shortIWork) + } + + // Quick return if possible. + if anorm == 0 { + return 0 + } + + bi := blas64.Implementation() + var rcond, ainvnm float64 + var kase int + var normin bool + isave := new([3]int) + onenrm := norm == lapack.MaxColumnSum + smlnum := dlamchS + kase1 := 2 + if onenrm { + kase1 = 1 + } + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / ainvnm) / anorm + } + return rcond + } + var sl, su float64 + if kase == kase1 { + sl = impl.Dlatrs(blas.Lower, blas.NoTrans, blas.Unit, normin, n, a, lda, work, work[2*n:]) + su = impl.Dlatrs(blas.Upper, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[3*n:]) + } else { + su = impl.Dlatrs(blas.Upper, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[3*n:]) + sl = impl.Dlatrs(blas.Lower, blas.Trans, blas.Unit, normin, n, a, lda, work, work[2*n:]) + } + scale := sl * su + normin = true + if scale != 1 { + ix := bi.Idamax(n, work, 1) + if scale == 0 || scale < math.Abs(work[ix])*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go new file mode 100644 index 00000000..0da4e609 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go @@ -0,0 +1,279 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgeev computes the eigenvalues and, optionally, the left and/or right +// eigenvectors for an n×n real nonsymmetric matrix A. +// +// The right eigenvector v_j of A corresponding to an eigenvalue λ_j +// is defined by +// A v_j = λ_j v_j, +// and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by +// u_j^H A = λ_j u_j^H, +// where u_j^H is the conjugate transpose of u_j. +// +// On return, A will be overwritten and the left and right eigenvectors will be +// stored, respectively, in the columns of the n×n matrices VL and VR in the +// same order as their eigenvalues. If the j-th eigenvalue is real, then +// u_j = VL[:,j], +// v_j = VR[:,j], +// and if it is not real, then j and j+1 form a complex conjugate pair and the +// eigenvectors can be recovered as +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// where i is the imaginary unit. The computed eigenvectors are normalized to +// have Euclidean norm equal to 1 and largest component real. +// +// Left eigenvectors will be computed only if jobvl == lapack.LeftEVCompute, +// otherwise jobvl must be lapack.LeftEVNone. +// Right eigenvectors will be computed only if jobvr == lapack.RightEVCompute, +// otherwise jobvr must be lapack.RightEVNone. +// For other values of jobvl and jobvr Dgeev will panic. +// +// wr and wi contain the real and imaginary parts, respectively, of the computed +// eigenvalues. Complex conjugate pairs of eigenvalues appear consecutively with +// the eigenvalue having the positive imaginary part first. +// wr and wi must have length n, and Dgeev will panic otherwise. +// +// work must have length at least lwork and lwork must be at least max(1,4*n) if +// the left or right eigenvectors are computed, and at least max(1,3*n) if no +// eigenvectors are computed. For good performance, lwork must generally be +// larger. On return, optimal value of lwork will be stored in work[0]. +// +// If lwork == -1, instead of performing Dgeev, the function only calculates the +// optimal vaule of lwork and stores it into work[0]. +// +// On return, first is the index of the first valid eigenvalue. If first == 0, +// all eigenvalues and eigenvectors have been computed. If first is positive, +// Dgeev failed to compute all the eigenvalues, no eigenvectors have been +// computed and wr[first:] and wi[first:] contain those eigenvalues which have +// converged. +func (impl Implementation) Dgeev(jobvl lapack.LeftEVJob, jobvr lapack.RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int) { + wantvl := jobvl == lapack.LeftEVCompute + wantvr := jobvr == lapack.RightEVCompute + var minwrk int + if wantvl || wantvr { + minwrk = max(1, 4*n) + } else { + minwrk = max(1, 3*n) + } + switch { + case jobvl != lapack.LeftEVCompute && jobvl != lapack.LeftEVNone: + panic(badLeftEVJob) + case jobvr != lapack.RightEVCompute && jobvr != lapack.RightEVNone: + panic(badRightEVJob) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case ldvl < 1 || (ldvl < n && wantvl): + panic(badLdVL) + case ldvr < 1 || (ldvr < n && wantvr): + panic(badLdVR) + case lwork < minwrk && lwork != -1: + panic(badLWork) + case len(work) < lwork: + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return 0 + } + + maxwrk := 2*n + n*impl.Ilaenv(1, "DGEHRD", " ", n, 1, n, 0) + if wantvl || wantvr { + maxwrk = max(maxwrk, 2*n+(n-1)*impl.Ilaenv(1, "DORGHR", " ", n, 1, n, -1)) + impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, 0, n-1, + a, lda, wr, wi, nil, n, work, -1) + maxwrk = max(maxwrk, max(n+1, n+int(work[0]))) + side := lapack.EVLeft + if wantvr { + side = lapack.EVRight + } + impl.Dtrevc3(side, lapack.EVAllMulQ, nil, n, a, lda, vl, ldvl, vr, ldvr, + n, work, -1) + maxwrk = max(maxwrk, n+int(work[0])) + maxwrk = max(maxwrk, 4*n) + } else { + impl.Dhseqr(lapack.EigenvaluesOnly, lapack.SchurNone, n, 0, n-1, + a, lda, wr, wi, vr, ldvr, work, -1) + maxwrk = max(maxwrk, max(n+1, n+int(work[0]))) + } + maxwrk = max(maxwrk, minwrk) + + if lwork == -1 { + work[0] = float64(maxwrk) + return 0 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(wr) != n: + panic(badLenWr) + case len(wi) != n: + panic(badLenWi) + case len(vl) < (n-1)*ldvl+n && wantvl: + panic(shortVL) + case len(vr) < (n-1)*ldvr+n && wantvr: + panic(shortVR) + } + + // Get machine constants. + smlnum := math.Sqrt(dlamchS) / dlamchP + bignum := 1 / smlnum + + // Scale A if max element outside range [smlnum,bignum]. + anrm := impl.Dlange(lapack.MaxAbs, n, n, a, lda, nil) + var scalea bool + var cscale float64 + if 0 < anrm && anrm < smlnum { + scalea = true + cscale = smlnum + } else if anrm > bignum { + scalea = true + cscale = bignum + } + if scalea { + impl.Dlascl(lapack.General, 0, 0, anrm, cscale, n, n, a, lda) + } + + // Balance the matrix. + workbal := work[:n] + ilo, ihi := impl.Dgebal(lapack.PermuteScale, n, a, lda, workbal) + + // Reduce to upper Hessenberg form. + iwrk := 2 * n + tau := work[n : iwrk-1] + impl.Dgehrd(n, ilo, ihi, a, lda, tau, work[iwrk:], lwork-iwrk) + + var side lapack.EVSide + if wantvl { + side = lapack.EVLeft + // Copy Householder vectors to VL. + impl.Dlacpy(blas.Lower, n, n, a, lda, vl, ldvl) + // Generate orthogonal matrix in VL. + impl.Dorghr(n, ilo, ihi, vl, ldvl, tau, work[iwrk:], lwork-iwrk) + // Perform QR iteration, accumulating Schur vectors in VL. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, ilo, ihi, + a, lda, wr, wi, vl, ldvl, work[iwrk:], lwork-iwrk) + if wantvr { + // Want left and right eigenvectors. + // Copy Schur vectors to VR. + side = lapack.EVBoth + impl.Dlacpy(blas.All, n, n, vl, ldvl, vr, ldvr) + } + } else if wantvr { + side = lapack.EVRight + // Copy Householder vectors to VR. + impl.Dlacpy(blas.Lower, n, n, a, lda, vr, ldvr) + // Generate orthogonal matrix in VR. + impl.Dorghr(n, ilo, ihi, vr, ldvr, tau, work[iwrk:], lwork-iwrk) + // Perform QR iteration, accumulating Schur vectors in VR. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, ilo, ihi, + a, lda, wr, wi, vr, ldvr, work[iwrk:], lwork-iwrk) + } else { + // Compute eigenvalues only. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesOnly, lapack.SchurNone, n, ilo, ihi, + a, lda, wr, wi, nil, 1, work[iwrk:], lwork-iwrk) + } + + if first > 0 { + if scalea { + // Undo scaling. + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wr[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wi[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, ilo, 1, wr, 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, ilo, 1, wi, 1) + } + work[0] = float64(maxwrk) + return first + } + + if wantvl || wantvr { + // Compute left and/or right eigenvectors. + impl.Dtrevc3(side, lapack.EVAllMulQ, nil, n, + a, lda, vl, ldvl, vr, ldvr, n, work[iwrk:], lwork-iwrk) + } + bi := blas64.Implementation() + if wantvl { + // Undo balancing of left eigenvectors. + impl.Dgebak(lapack.PermuteScale, lapack.EVLeft, n, ilo, ihi, workbal, n, vl, ldvl) + // Normalize left eigenvectors and make largest component real. + for i, wii := range wi { + if wii < 0 { + continue + } + if wii == 0 { + scl := 1 / bi.Dnrm2(n, vl[i:], ldvl) + bi.Dscal(n, scl, vl[i:], ldvl) + continue + } + scl := 1 / impl.Dlapy2(bi.Dnrm2(n, vl[i:], ldvl), bi.Dnrm2(n, vl[i+1:], ldvl)) + bi.Dscal(n, scl, vl[i:], ldvl) + bi.Dscal(n, scl, vl[i+1:], ldvl) + for k := 0; k < n; k++ { + vi := vl[k*ldvl+i] + vi1 := vl[k*ldvl+i+1] + work[iwrk+k] = vi*vi + vi1*vi1 + } + k := bi.Idamax(n, work[iwrk:iwrk+n], 1) + cs, sn, _ := impl.Dlartg(vl[k*ldvl+i], vl[k*ldvl+i+1]) + bi.Drot(n, vl[i:], ldvl, vl[i+1:], ldvl, cs, sn) + vl[k*ldvl+i+1] = 0 + } + } + if wantvr { + // Undo balancing of right eigenvectors. + impl.Dgebak(lapack.PermuteScale, lapack.EVRight, n, ilo, ihi, workbal, n, vr, ldvr) + // Normalize right eigenvectors and make largest component real. + for i, wii := range wi { + if wii < 0 { + continue + } + if wii == 0 { + scl := 1 / bi.Dnrm2(n, vr[i:], ldvr) + bi.Dscal(n, scl, vr[i:], ldvr) + continue + } + scl := 1 / impl.Dlapy2(bi.Dnrm2(n, vr[i:], ldvr), bi.Dnrm2(n, vr[i+1:], ldvr)) + bi.Dscal(n, scl, vr[i:], ldvr) + bi.Dscal(n, scl, vr[i+1:], ldvr) + for k := 0; k < n; k++ { + vi := vr[k*ldvr+i] + vi1 := vr[k*ldvr+i+1] + work[iwrk+k] = vi*vi + vi1*vi1 + } + k := bi.Idamax(n, work[iwrk:iwrk+n], 1) + cs, sn, _ := impl.Dlartg(vr[k*ldvr+i], vr[k*ldvr+i+1]) + bi.Drot(n, vr[i:], ldvr, vr[i+1:], ldvr, cs, sn) + vr[k*ldvr+i+1] = 0 + } + } + + if scalea { + // Undo scaling. + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wr[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wi[first:], 1) + } + + work[0] = float64(maxwrk) + return first +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go new file mode 100644 index 00000000..261f21b9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go @@ -0,0 +1,97 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgehd2 reduces a block of a general n×n matrix A to upper Hessenberg form H +// by an orthogonal similarity transformation Q^T * A * Q = H. +// +// The matrix Q is represented as a product of (ihi-ilo) elementary +// reflectors +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T +// where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. +// v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. +// +// On entry, a contains the n×n general matrix to be reduced. On return, the +// upper triangle and the first subdiagonal of A are overwritten with the upper +// Hessenberg matrix H, and the elements below the first subdiagonal, with the +// slice tau, represent the orthogonal matrix Q as a product of elementary +// reflectors. +// +// The contents of A are illustrated by the following example, with n = 7, ilo = +// 1 and ihi = 5. +// On entry, +// [ a a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a ] +// on return, +// [ a a h h h h a ] +// [ a h h h h a ] +// [ h h h h h h ] +// [ v1 h h h h h ] +// [ v1 v2 h h h h ] +// [ v1 v2 v3 h h h ] +// [ a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// ilo and ihi determine the block of A that will be reduced to upper Hessenberg +// form. It must hold that 0 <= ilo <= ihi <= max(0, n-1), otherwise Dgehd2 will +// panic. +// +// On return, tau will contain the scalar factors of the elementary reflectors. +// It must have length equal to n-1, otherwise Dgehd2 will panic. +// +// work must have length at least n, otherwise Dgehd2 will panic. +// +// Dgehd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgehd2(n, ilo, ihi int, a []float64, lda int, tau, work []float64) { + switch { + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(tau) != n-1: + panic(badLenTau) + case len(work) < n: + panic(shortWork) + } + + for i := ilo; i < ihi; i++ { + // Compute elementary reflector H_i to annihilate A[i+2:ihi+1,i]. + var aii float64 + aii, tau[i] = impl.Dlarfg(ihi-i, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + a[(i+1)*lda+i] = 1 + + // Apply H_i to A[0:ihi+1,i+1:ihi+1] from the right. + impl.Dlarf(blas.Right, ihi+1, ihi-i, a[(i+1)*lda+i:], lda, tau[i], a[i+1:], lda, work) + + // Apply H_i to A[i+1:ihi+1,i+1:n] from the left. + impl.Dlarf(blas.Left, ihi-i, n-i-1, a[(i+1)*lda+i:], lda, tau[i], a[(i+1)*lda+i+1:], lda, work) + a[(i+1)*lda+i] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go new file mode 100644 index 00000000..89b73cef --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go @@ -0,0 +1,194 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgehrd reduces a block of a real n×n general matrix A to upper Hessenberg +// form H by an orthogonal similarity transformation Q^T * A * Q = H. +// +// The matrix Q is represented as a product of (ihi-ilo) elementary +// reflectors +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T +// where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. +// v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. +// +// On entry, a contains the n×n general matrix to be reduced. On return, the +// upper triangle and the first subdiagonal of A will be overwritten with the +// upper Hessenberg matrix H, and the elements below the first subdiagonal, with +// the slice tau, represent the orthogonal matrix Q as a product of elementary +// reflectors. +// +// The contents of a are illustrated by the following example, with n = 7, ilo = +// 1 and ihi = 5. +// On entry, +// [ a a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a ] +// on return, +// [ a a h h h h a ] +// [ a h h h h a ] +// [ h h h h h h ] +// [ v1 h h h h h ] +// [ v1 v2 h h h h ] +// [ v1 v2 v3 h h h ] +// [ a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// ilo and ihi determine the block of A that will be reduced to upper Hessenberg +// form. It must hold that 0 <= ilo <= ihi < n if n > 0, and ilo == 0 and ihi == +// -1 if n == 0, otherwise Dgehrd will panic. +// +// On return, tau will contain the scalar factors of the elementary reflectors. +// Elements tau[:ilo] and tau[ihi:] will be set to zero. tau must have length +// equal to n-1 if n > 0, otherwise Dgehrd will panic. +// +// work must have length at least lwork and lwork must be at least max(1,n), +// otherwise Dgehrd will panic. On return, work[0] contains the optimal value of +// lwork. +// +// If lwork == -1, instead of performing Dgehrd, only the optimal value of lwork +// will be stored in work[0]. +// +// Dgehrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgehrd(n, ilo, ihi int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < lwork: + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + const ( + nbmax = 64 + ldt = nbmax + 1 + tsize = ldt * nbmax + ) + // Compute the workspace requirements. + nb := min(nbmax, impl.Ilaenv(1, "DGEHRD", " ", n, ilo, ihi, -1)) + lwkopt := n*nb + tsize + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + if len(tau) != n-1 { + panic(badLenTau) + } + + // Set tau[:ilo] and tau[ihi:] to zero. + for i := 0; i < ilo; i++ { + tau[i] = 0 + } + for i := ihi; i < n-1; i++ { + tau[i] = 0 + } + + // Quick return if possible. + nh := ihi - ilo + 1 + if nh <= 1 { + work[0] = 1 + return + } + + // Determine the block size. + nbmin := 2 + var nx int + if 1 < nb && nb < nh { + // Determine when to cross over from blocked to unblocked code + // (last block is always handled by unblocked code). + nx = max(nb, impl.Ilaenv(3, "DGEHRD", " ", n, ilo, ihi, -1)) + if nx < nh { + // Determine if workspace is large enough for blocked code. + if lwork < n*nb+tsize { + // Not enough workspace to use optimal nb: + // determine the minimum value of nb, and reduce + // nb or force use of unblocked code. + nbmin = max(2, impl.Ilaenv(2, "DGEHRD", " ", n, ilo, ihi, -1)) + if lwork >= n*nbmin+tsize { + nb = (lwork - tsize) / n + } else { + nb = 1 + } + } + } + } + ldwork := nb // work is used as an n×nb matrix. + + var i int + if nb < nbmin || nh <= nb { + // Use unblocked code below. + i = ilo + } else { + // Use blocked code. + bi := blas64.Implementation() + iwt := n * nb // Size of the matrix Y and index where the matrix T starts in work. + for i = ilo; i < ihi-nx; i += nb { + ib := min(nb, ihi-i) + + // Reduce columns [i:i+ib] to Hessenberg form, returning the + // matrices V and T of the block reflector H = I - V*T*V^T + // which performs the reduction, and also the matrix Y = A*V*T. + impl.Dlahr2(ihi+1, i+1, ib, a[i:], lda, tau[i:], work[iwt:], ldt, work, ldwork) + + // Apply the block reflector H to A[:ihi+1,i+ib:ihi+1] from the + // right, computing A := A - Y * V^T. V[i+ib,i+ib-1] must be set + // to 1. + ei := a[(i+ib)*lda+i+ib-1] + a[(i+ib)*lda+i+ib-1] = 1 + bi.Dgemm(blas.NoTrans, blas.Trans, ihi+1, ihi-i-ib+1, ib, + -1, work, ldwork, + a[(i+ib)*lda+i:], lda, + 1, a[i+ib:], lda) + a[(i+ib)*lda+i+ib-1] = ei + + // Apply the block reflector H to A[0:i+1,i+1:i+ib-1] from the + // right. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, i+1, ib-1, + 1, a[(i+1)*lda+i:], lda, work, ldwork) + for j := 0; j <= ib-2; j++ { + bi.Daxpy(i+1, -1, work[j:], ldwork, a[i+j+1:], lda) + } + + // Apply the block reflector H to A[i+1:ihi+1,i+ib:n] from the + // left. + impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, + ihi-i, n-i-ib, ib, + a[(i+1)*lda+i:], lda, work[iwt:], ldt, a[(i+1)*lda+i+ib:], lda, work, ldwork) + } + } + // Use unblocked code to reduce the rest of the matrix. + impl.Dgehd2(n, i, ihi, a, lda, tau, work) + work[0] = float64(lwkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go new file mode 100644 index 00000000..abc96f7d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go @@ -0,0 +1,65 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgelq2 computes the LQ factorization of the m×n matrix A. +// +// In an LQ factorization, L is a lower triangular m×n matrix, and Q is an n×n +// orthonormal matrix. +// +// a is modified to contain the information to construct L and Q. +// The lower triangle of a contains the matrix L. The upper triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length of at least k = min(m,n) +// and this function will panic otherwise. +// +// See Dgeqr2 for a description of the elementary reflectors and orthonormal +// matrix Q. Q is constructed as a product of these elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0. +// +// work is temporary storage of length at least m and this function will panic otherwise. +// +// Dgelq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgelq2(m, n int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < m: + panic(shortWork) + } + + for i := 0; i < k; i++ { + a[i*lda+i], tau[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + if i < m-1 { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i, + a[i*lda+i:], 1, + tau[i], + a[(i+1)*lda+i:], lda, + work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go new file mode 100644 index 00000000..f1fd13a0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go @@ -0,0 +1,97 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgelqf computes the LQ factorization of the m×n matrix A using a blocked +// algorithm. See the documentation for Dgelq2 for a description of the +// parameters at entry and exit. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m, and this function will panic otherwise. +// Dgelqf is a blocked LQ factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgelqf, +// the optimal work length will be stored into work[0]. +// +// tau must have length at least min(m,n), and this function will panic otherwise. +func (impl Implementation) Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, m) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + k := min(m, n) + if k == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) + if lwork == -1 { + work[0] = float64(m * nb) + return + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + if len(tau) < k { + panic(shortTau) + } + + // Find the optimal blocking size based on the size of available memory + // and optimal machine parameters. + nbmin := 2 + var nx int + iws := m + if 1 < nb && nb < k { + nx = max(0, impl.Ilaenv(3, "DGELQF", " ", m, n, -1, -1)) + if nx < k { + iws = m * nb + if lwork < iws { + nb = lwork / m + nbmin = max(2, impl.Ilaenv(2, "DGELQF", " ", m, n, -1, -1)) + } + } + } + ldwork := nb + // Computed blocked LQ factorization. + var i int + if nbmin <= nb && nb < k && nx < k { + for i = 0; i < k-nx; i += nb { + ib := min(k-i, nb) + impl.Dgelq2(ib, n-i, a[i*lda+i:], lda, tau[i:], work) + if i+ib < m { + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Forward, lapack.RowWise, + m-i-ib, n-i, ib, + a[i*lda+i:], lda, + work, ldwork, + a[(i+ib)*lda+i:], lda, + work[ib*ldwork:], ldwork) + } + } + } + // Perform unblocked LQ factorization on the remainder. + if i < k { + impl.Dgelq2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go new file mode 100644 index 00000000..a3894b6a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go @@ -0,0 +1,219 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgels finds a minimum-norm solution based on the matrices A and B using the +// QR or LQ factorization. Dgels returns false if the matrix +// A is singular, and true if this solution was successfully found. +// +// The minimization problem solved depends on the input parameters. +// +// 1. If m >= n and trans == blas.NoTrans, Dgels finds X such that || A*X - B||_2 +// is minimized. +// 2. If m < n and trans == blas.NoTrans, Dgels finds the minimum norm solution of +// A * X = B. +// 3. If m >= n and trans == blas.Trans, Dgels finds the minimum norm solution of +// A^T * X = B. +// 4. If m < n and trans == blas.Trans, Dgels finds X such that || A*X - B||_2 +// is minimized. +// Note that the least-squares solutions (cases 1 and 3) perform the minimization +// per column of B. This is not the same as finding the minimum-norm matrix. +// +// The matrix A is a general matrix of size m×n and is modified during this call. +// The input matrix B is of size max(m,n)×nrhs, and serves two purposes. On entry, +// the elements of b specify the input matrix B. B has size m×nrhs if +// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the +// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, +// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic +// otherwise. A longer work will enable blocked algorithms to be called. +// In the special case that lwork == -1, work[0] will be set to the optimal working +// length. +func (impl Implementation) Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool { + mn := min(m, n) + minwrk := mn + max(mn, nrhs) + switch { + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case nrhs < 0: + panic(nrhsLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, nrhs): + panic(badLdB) + case lwork < max(1, minwrk) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if mn == 0 || nrhs == 0 { + impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) + work[0] = 1 + return true + } + + // Find optimal block size. + var nb int + if m >= n { + nb = impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) + if trans != blas.NoTrans { + nb = max(nb, impl.Ilaenv(1, "DORMQR", "LN", m, nrhs, n, -1)) + } else { + nb = max(nb, impl.Ilaenv(1, "DORMQR", "LT", m, nrhs, n, -1)) + } + } else { + nb = impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) + if trans != blas.NoTrans { + nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LT", n, nrhs, m, -1)) + } else { + nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LN", n, nrhs, m, -1)) + } + } + wsize := max(1, mn+max(mn, nrhs)*nb) + work[0] = float64(wsize) + + if lwork == -1 { + return true + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(b) < (max(m, n)-1)*ldb+nrhs: + panic(shortB) + } + + // Scale the input matrices if they contain extreme values. + smlnum := dlamchS / dlamchP + bignum := 1 / smlnum + anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) + var iascl int + if anrm > 0 && anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) + iascl = 1 + } else if anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) + } else if anrm == 0 { + // Matrix is all zeros. + impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) + return true + } + brow := m + if trans != blas.NoTrans { + brow = n + } + bnrm := impl.Dlange(lapack.MaxAbs, brow, nrhs, b, ldb, nil) + ibscl := 0 + if bnrm > 0 && bnrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, bnrm, smlnum, brow, nrhs, b, ldb) + ibscl = 1 + } else if bnrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bnrm, bignum, brow, nrhs, b, ldb) + ibscl = 2 + } + + // Solve the minimization problem using a QR or an LQ decomposition. + var scllen int + if m >= n { + impl.Dgeqrf(m, n, a, lda, work, work[mn:], lwork-mn) + if trans == blas.NoTrans { + impl.Dormqr(blas.Left, blas.Trans, m, nrhs, n, + a, lda, + work[:n], + b, ldb, + work[mn:], lwork-mn) + ok := impl.Dtrtrs(blas.Upper, blas.NoTrans, blas.NonUnit, n, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + scllen = n + } else { + ok := impl.Dtrtrs(blas.Upper, blas.Trans, blas.NonUnit, n, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + for i := n; i < m; i++ { + for j := 0; j < nrhs; j++ { + b[i*ldb+j] = 0 + } + } + impl.Dormqr(blas.Left, blas.NoTrans, m, nrhs, n, + a, lda, + work[:n], + b, ldb, + work[mn:], lwork-mn) + scllen = m + } + } else { + impl.Dgelqf(m, n, a, lda, work, work[mn:], lwork-mn) + if trans == blas.NoTrans { + ok := impl.Dtrtrs(blas.Lower, blas.NoTrans, blas.NonUnit, + m, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + for i := m; i < n; i++ { + for j := 0; j < nrhs; j++ { + b[i*ldb+j] = 0 + } + } + impl.Dormlq(blas.Left, blas.Trans, n, nrhs, m, + a, lda, + work, + b, ldb, + work[mn:], lwork-mn) + scllen = n + } else { + impl.Dormlq(blas.Left, blas.NoTrans, n, nrhs, m, + a, lda, + work, + b, ldb, + work[mn:], lwork-mn) + ok := impl.Dtrtrs(blas.Lower, blas.Trans, blas.NonUnit, + m, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + } + } + + // Adjust answer vector based on scaling. + if iascl == 1 { + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, scllen, nrhs, b, ldb) + } + if iascl == 2 { + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, scllen, nrhs, b, ldb) + } + if ibscl == 1 { + impl.Dlascl(lapack.General, 0, 0, smlnum, bnrm, scllen, nrhs, b, ldb) + } + if ibscl == 2 { + impl.Dlascl(lapack.General, 0, 0, bignum, bnrm, scllen, nrhs, b, ldb) + } + + work[0] = float64(wsize) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go new file mode 100644 index 00000000..3f3ddb16 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go @@ -0,0 +1,61 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgeql2 computes the QL factorization of the m×n matrix A. That is, Dgeql2 +// computes Q and L such that +// A = Q * L +// where Q is an m×m orthonormal matrix and L is a lower trapezoidal matrix. +// +// Q is represented as a product of elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0 +// where k = min(m,n) and each H_i has the form +// H_i = I - tau[i] * v_i * v_i^T +// Vector v_i has v[m-k+i+1:m] = 0, v[m-k+i] = 1, and v[:m-k+i+1] is stored on +// exit in A[0:m-k+i-1, n-k+i]. +// +// tau must have length at least min(m,n), and Dgeql2 will panic otherwise. +// +// work is temporary memory storage and must have length at least n. +// +// Dgeql2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeql2(m, n int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < n: + panic(shortWork) + } + + var aii float64 + for i := k - 1; i >= 0; i-- { + // Generate elementary reflector H_i to annihilate A[0:m-k+i-1, n-k+i]. + aii, tau[i] = impl.Dlarfg(m-k+i+1, a[(m-k+i)*lda+n-k+i], a[n-k+i:], lda) + + // Apply H_i to A[0:m-k+i, 0:n-k+i-1] from the left. + a[(m-k+i)*lda+n-k+i] = 1 + impl.Dlarf(blas.Left, m-k+i+1, n-k+i, a[n-k+i:], lda, tau[i], a, lda, work) + a[(m-k+i)*lda+n-k+i] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go new file mode 100644 index 00000000..6949da96 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go @@ -0,0 +1,186 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgeqp3 computes a QR factorization with column pivoting of the +// m×n matrix A: A*P = Q*R using Level 3 BLAS. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{k-1}, where k = min(m,n). +// Each H_i has the form +// H_i = I - tau * v * v^T +// where tau and v are real vectors with v[0:i-1] = 0 and v[i] = 1; +// v[i:m] is stored on exit in A[i:m, i], and tau in tau[i]. +// +// jpvt specifies a column pivot to be applied to A. If +// jpvt[j] is at least zero, the jth column of A is permuted +// to the front of A*P (a leading column), if jpvt[j] is -1 +// the jth column of A is a free column. If jpvt[j] < -1, Dgeqp3 +// will panic. On return, jpvt holds the permutation that was +// applied; the jth column of A*P was the jpvt[j] column of A. +// jpvt must have length n or Dgeqp3 will panic. +// +// tau holds the scalar factors of the elementary reflectors. +// It must have length min(m, n), otherwise Dgeqp3 will panic. +// +// work must have length at least max(1,lwork), and lwork must be at least +// 3*n+1, otherwise Dgeqp3 will panic. For optimal performance lwork must +// be at least 2*n+(n+1)*nb, where nb is the optimal blocksize. On return, +// work[0] will contain the optimal value of lwork. +// +// If lwork == -1, instead of performing Dgeqp3, only the optimal value of lwork +// will be stored in work[0]. +// +// Dgeqp3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeqp3(m, n int, a []float64, lda int, jpvt []int, tau, work []float64, lwork int) { + const ( + inb = 1 + inbmin = 2 + ixover = 3 + ) + + minmn := min(m, n) + iws := 3*n + 1 + if minmn == 0 { + iws = 1 + } + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < iws && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if minmn == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(inb, "DGEQRF", " ", m, n, -1, -1) + if lwork == -1 { + work[0] = float64(2*n + (n+1)*nb) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(jpvt) != n: + panic(badLenJpvt) + case len(tau) < minmn: + panic(shortTau) + } + + for _, v := range jpvt { + if v < -1 || n <= v { + panic(badJpvt) + } + } + + bi := blas64.Implementation() + + // Move initial columns up front. + var nfxd int + for j := 0; j < n; j++ { + if jpvt[j] == -1 { + jpvt[j] = j + continue + } + if j != nfxd { + bi.Dswap(m, a[j:], lda, a[nfxd:], lda) + jpvt[j], jpvt[nfxd] = jpvt[nfxd], j + } else { + jpvt[j] = j + } + nfxd++ + } + + // Factorize nfxd columns. + // + // Compute the QR factorization of nfxd columns and update remaining columns. + if nfxd > 0 { + na := min(m, nfxd) + impl.Dgeqrf(m, na, a, lda, tau, work, lwork) + iws = max(iws, int(work[0])) + if na < n { + impl.Dormqr(blas.Left, blas.Trans, m, n-na, na, a, lda, tau[:na], a[na:], lda, + work, lwork) + iws = max(iws, int(work[0])) + } + } + + if nfxd >= minmn { + work[0] = float64(iws) + return + } + + // Factorize free columns. + sm := m - nfxd + sn := n - nfxd + sminmn := minmn - nfxd + + // Determine the block size. + nb = impl.Ilaenv(inb, "DGEQRF", " ", sm, sn, -1, -1) + nbmin := 2 + nx := 0 + + if 1 < nb && nb < sminmn { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(ixover, "DGEQRF", " ", sm, sn, -1, -1)) + + if nx < sminmn { + // Determine if workspace is large enough for blocked code. + minws := 2*sn + (sn+1)*nb + iws = max(iws, minws) + if lwork < minws { + // Not enough workspace to use optimal nb. Reduce + // nb and determine the minimum value of nb. + nb = (lwork - 2*sn) / (sn + 1) + nbmin = max(2, impl.Ilaenv(inbmin, "DGEQRF", " ", sm, sn, -1, -1)) + } + } + } + + // Initialize partial column norms. + // The first n elements of work store the exact column norms. + for j := nfxd; j < n; j++ { + work[j] = bi.Dnrm2(sm, a[nfxd*lda+j:], lda) + work[n+j] = work[j] + } + j := nfxd + if nbmin <= nb && nb < sminmn && nx < sminmn { + // Use blocked code initially. + + // Compute factorization. + var fjb int + for topbmn := minmn - nx; j < topbmn; j += fjb { + jb := min(nb, topbmn-j) + + // Factorize jb columns among columns j:n. + fjb = impl.Dlaqps(m, n-j, j, jb, a[j:], lda, jpvt[j:], tau[j:], + work[j:n], work[j+n:2*n], work[2*n:2*n+jb], work[2*n+jb:], jb) + } + } + + // Use unblocked code to factor the last or only block. + if j < minmn { + impl.Dlaqp2(m, n-j, j, a[j:], lda, jpvt[j:], tau[j:], + work[j:n], work[j+n:2*n], work[2*n:]) + } + + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go new file mode 100644 index 00000000..3e35d7e2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go @@ -0,0 +1,76 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgeqr2 computes a QR factorization of the m×n matrix A. +// +// In a QR factorization, Q is an m×m orthonormal matrix, and R is an +// upper triangular m×n matrix. +// +// A is modified to contain the information to construct Q and R. +// The upper triangle of a contains the matrix R. The lower triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length at least min(m,n), and +// this function will panic otherwise. +// +// The ith elementary reflector can be explicitly constructed by first extracting +// the +// v[j] = 0 j < i +// v[j] = 1 j == i +// v[j] = a[j*lda+i] j > i +// and computing H_i = I - tau[i] * v * v^T. +// +// The orthonormal matrix Q can be constructed from a product of these elementary +// reflectors, Q = H_0 * H_1 * ... * H_{k-1}, where k = min(m,n). +// +// work is temporary storage of length at least n and this function will panic otherwise. +// +// Dgeqr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeqr2(m, n int, a []float64, lda int, tau, work []float64) { + // TODO(btracey): This is oriented such that columns of a are eliminated. + // This likely could be re-arranged to take better advantage of row-major + // storage. + + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case len(work) < n: + panic(shortWork) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + for i := 0; i < k; i++ { + // Generate elementary reflector H_i. + a[i*lda+i], tau[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min((i+1), m-1)*lda+i:], lda) + if i < n-1 { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(blas.Left, m-i, n-i-1, + a[i*lda+i:], lda, + tau[i], + a[i*lda+i+1:], lda, + work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go new file mode 100644 index 00000000..300f8eea --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go @@ -0,0 +1,108 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgeqrf computes the QR factorization of the m×n matrix A using a blocked +// algorithm. See the documentation for Dgeqr2 for a description of the +// parameters at entry and exit. +// +// work is temporary storage, and lwork specifies the usable memory length. +// The length of work must be at least max(1, lwork) and lwork must be -1 +// or at least n, otherwise this function will panic. +// Dgeqrf is a blocked QR factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgeqrf, +// the optimal work length will be stored into work[0]. +// +// tau must have length at least min(m,n), and this function will panic otherwise. +func (impl Implementation) Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + work[0] = 1 + return + } + + // nb is the optimal blocksize, i.e. the number of columns transformed at a time. + nb := impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) + if lwork == -1 { + work[0] = float64(n * nb) + return + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + if len(tau) < k { + panic(shortTau) + } + + nbmin := 2 // Minimal block size. + var nx int // Use unblocked (unless changed in the next for loop) + iws := n + // Only consider blocked if the suggested block size is > 1 and the + // number of rows or columns is sufficiently large. + if 1 < nb && nb < k { + // nx is the block size at which the code switches from blocked + // to unblocked. + nx = max(0, impl.Ilaenv(3, "DGEQRF", " ", m, n, -1, -1)) + if k > nx { + iws = n * nb + if lwork < iws { + // Not enough workspace to use the optimal block + // size. Get the minimum block size instead. + nb = lwork / n + nbmin = max(2, impl.Ilaenv(2, "DGEQRF", " ", m, n, -1, -1)) + } + } + } + + // Compute QR using a blocked algorithm. + var i int + if nbmin <= nb && nb < k && nx < k { + ldwork := nb + for i = 0; i < k-nx; i += nb { + ib := min(k-i, nb) + // Compute the QR factorization of the current block. + impl.Dgeqr2(m-i, ib, a[i*lda+i:], lda, tau[i:], work) + if i+ib < n { + // Form the triangular factor of the block reflector and apply H^T + // In Dlarft, work becomes the T matrix. + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, + m-i, n-i-ib, ib, + a[i*lda+i:], lda, + work, ldwork, + a[i*lda+i+ib:], lda, + work[ib*ldwork:], ldwork) + } + } + } + // Call unblocked code on the remaining columns. + if i < k { + impl.Dgeqr2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go new file mode 100644 index 00000000..60dac973 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go @@ -0,0 +1,68 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgerq2 computes an RQ factorization of the m×n matrix A, +// A = R * Q. +// On exit, if m <= n, the upper triangle of the subarray +// A[0:m, n-m:n] contains the m×m upper triangular matrix R. +// If m >= n, the elements on and above the (m-n)-th subdiagonal +// contain the m×n upper trapezoidal matrix R. +// The remaining elements, with tau, represent the +// orthogonal matrix Q as a product of min(m,n) elementary +// reflectors. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// Each H(i) has the form +// H_i = I - tau_i * v * v^T +// where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], +// v[n-k+i:n] = 0 and v[n-k+i] = 1. +// +// tau must have length min(m,n) and work must have length m, otherwise +// Dgerq2 will panic. +// +// Dgerq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgerq2(m, n int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case len(work) < m: + panic(shortWork) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + for i := k - 1; i >= 0; i-- { + // Generate elementary reflector H[i] to annihilate + // A[m-k+i, 0:n-k+i-1]. + mki := m - k + i + nki := n - k + i + var aii float64 + aii, tau[i] = impl.Dlarfg(nki+1, a[mki*lda+nki], a[mki*lda:], 1) + + // Apply H[i] to A[0:m-k+i-1, 0:n-k+i] from the right. + a[mki*lda+nki] = 1 + impl.Dlarf(blas.Right, mki, nki+1, a[mki*lda:], 1, tau[i], a, lda, work) + a[mki*lda+nki] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go new file mode 100644 index 00000000..9b4aa050 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go @@ -0,0 +1,129 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgerqf computes an RQ factorization of the m×n matrix A, +// A = R * Q. +// On exit, if m <= n, the upper triangle of the subarray +// A[0:m, n-m:n] contains the m×m upper triangular matrix R. +// If m >= n, the elements on and above the (m-n)-th subdiagonal +// contain the m×n upper trapezoidal matrix R. +// The remaining elements, with tau, represent the +// orthogonal matrix Q as a product of min(m,n) elementary +// reflectors. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// Each H(i) has the form +// H_i = I - tau_i * v * v^T +// where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], +// v[n-k+i:n] = 0 and v[n-k+i] = 1. +// +// tau must have length min(m,n), work must have length max(1, lwork), +// and lwork must be -1 or at least max(1, m), otherwise Dgerqf will panic. +// On exit, work[0] will contain the optimal length for work. +// +// Dgerqf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgerqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, m) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DGERQF", " ", m, n, -1, -1) + if lwork == -1 { + work[0] = float64(m * nb) + return + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + if len(tau) != k { + panic(badLenTau) + } + + nbmin := 2 + nx := 1 + iws := m + var ldwork int + if 1 < nb && nb < k { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(3, "DGERQF", " ", m, n, -1, -1)) + if nx < k { + // Determine whether workspace is large enough for blocked code. + iws = m * nb + if lwork < iws { + // Not enough workspace to use optimal nb. Reduce + // nb and determine the minimum value of nb. + nb = lwork / m + nbmin = max(2, impl.Ilaenv(2, "DGERQF", " ", m, n, -1, -1)) + } + ldwork = nb + } + } + + var mu, nu int + if nbmin <= nb && nb < k && nx < k { + // Use blocked code initially. + // The last kk rows are handled by the block method. + ki := ((k - nx - 1) / nb) * nb + kk := min(k, ki+nb) + + var i int + for i = k - kk + ki; i >= k-kk; i -= nb { + ib := min(k-i, nb) + + // Compute the RQ factorization of the current block + // A[m-k+i:m-k+i+ib-1, 0:n-k+i+ib-1]. + impl.Dgerq2(ib, n-k+i+ib, a[(m-k+i)*lda:], lda, tau[i:], work) + if m-k+i > 0 { + // Form the triangular factor of the block reflector + // H = H_{i+ib-1} . . . H_{i+1} H_i. + impl.Dlarft(lapack.Backward, lapack.RowWise, + n-k+i+ib, ib, a[(m-k+i)*lda:], lda, tau[i:], + work, ldwork) + + // Apply H to A[0:m-k+i-1, 0:n-k+i+ib-1] from the right. + impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Backward, lapack.RowWise, + m-k+i, n-k+i+ib, ib, a[(m-k+i)*lda:], lda, + work, ldwork, + a, lda, + work[ib*ldwork:], ldwork) + } + } + mu = m - k + i + nb + nu = n - k + i + nb + } else { + mu = m + nu = n + } + + // Use unblocked code to factor the last or only block. + if mu > 0 && nu > 0 { + impl.Dgerq2(mu, nu, a, lda, tau, work) + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go new file mode 100644 index 00000000..136f683e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go @@ -0,0 +1,1374 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +const noSVDO = "dgesvd: not coded for overwrite" + +// Dgesvd computes the singular value decomposition of the input matrix A. +// +// The singular value decomposition is +// A = U * Sigma * V^T +// where Sigma is an m×n diagonal matrix containing the singular values of A, +// U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first +// min(m,n) columns of U and V are the left and right singular vectors of A +// respectively. +// +// jobU and jobVT are options for computing the singular vectors. The behavior +// is as follows +// jobU == lapack.SVDAll All m columns of U are returned in u +// jobU == lapack.SVDStore The first min(m,n) columns are returned in u +// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a +// jobU == lapack.SVDNone The columns of U are not computed. +// The behavior is the same for jobVT and the rows of V^T. At most one of jobU +// and jobVT can equal lapack.SVDOverwrite, and Dgesvd will panic otherwise. +// +// On entry, a contains the data for the m×n matrix A. During the call to Dgesvd +// the data is overwritten. On exit, A contains the appropriate singular vectors +// if either job is lapack.SVDOverwrite. +// +// s is a slice of length at least min(m,n) and on exit contains the singular +// values in decreasing order. +// +// u contains the left singular vectors on exit, stored column-wise. If +// jobU == lapack.SVDAll, u is of size m×m. If jobU == lapack.SVDStore u is +// of size m×min(m,n). If jobU == lapack.SVDOverwrite or lapack.SVDNone, u is +// not used. +// +// vt contains the left singular vectors on exit, stored row-wise. If +// jobV == lapack.SVDAll, vt is of size n×n. If jobVT == lapack.SVDStore vt is +// of size min(m,n)×n. If jobVT == lapack.SVDOverwrite or lapack.SVDNone, vt is +// not used. +// +// work is a slice for storing temporary memory, and lwork is the usable size of +// the slice. lwork must be at least max(5*min(m,n), 3*min(m,n)+max(m,n)). +// If lwork == -1, instead of performing Dgesvd, the optimal work length will be +// stored into work[0]. Dgesvd will panic if the working memory has insufficient +// storage. +// +// Dgesvd returns whether the decomposition successfully completed. +func (impl Implementation) Dgesvd(jobU, jobVT lapack.SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool) { + if jobU == lapack.SVDOverwrite || jobVT == lapack.SVDOverwrite { + panic(noSVDO) + } + + wantua := jobU == lapack.SVDAll + wantus := jobU == lapack.SVDStore + wantuas := wantua || wantus + wantuo := jobU == lapack.SVDOverwrite + wantun := jobU == lapack.SVDNone + if !(wantua || wantus || wantuo || wantun) { + panic(badSVDJob) + } + + wantva := jobVT == lapack.SVDAll + wantvs := jobVT == lapack.SVDStore + wantvas := wantva || wantvs + wantvo := jobVT == lapack.SVDOverwrite + wantvn := jobVT == lapack.SVDNone + if !(wantva || wantvs || wantvo || wantvn) { + panic(badSVDJob) + } + + if wantuo && wantvo { + panic(bothSVDOver) + } + + minmn := min(m, n) + minwork := 1 + if minmn > 0 { + minwork = max(3*minmn+max(m, n), 5*minmn) + } + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case ldu < 1, wantua && ldu < m, wantus && ldu < minmn: + panic(badLdU) + case ldvt < 1 || (wantvas && ldvt < n): + panic(badLdVT) + case lwork < minwork && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if minmn == 0 { + work[0] = 1 + return true + } + + // Compute optimal workspace size for subroutines. + opts := string(jobU) + string(jobVT) + mnthr := impl.Ilaenv(6, "DGESVD", opts, m, n, 0, 0) + maxwrk := 1 + var wrkbl, bdspac int + if m >= n { + bdspac = 5 * n + impl.Dgeqrf(m, n, a, lda, nil, work, -1) + lwork_dgeqrf := int(work[0]) + + impl.Dorgqr(m, n, n, a, lda, nil, work, -1) + lwork_dorgqr_n := int(work[0]) + impl.Dorgqr(m, m, n, a, lda, nil, work, -1) + lwork_dorgqr_m := int(work[0]) + + impl.Dgebrd(n, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + + impl.Dorgbr(lapack.GeneratePT, n, n, n, a, lda, nil, work, -1) + lwork_dorgbr_p := int(work[0]) + + impl.Dorgbr(lapack.GenerateQ, n, n, n, a, lda, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + + if m >= mnthr { + if wantun { + // Path 1 (m much larger than n, jobU == None) + maxwrk = n + lwork_dgeqrf + maxwrk = max(maxwrk, 3*n+lwork_dgebrd) + if wantvo || wantvas { + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_p) + } + maxwrk = max(maxwrk, bdspac) + } else if wantuo && wantvn { + // Path 2 (m much larger than n, jobU == Overwrite, jobVT == None) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(n*n+wrkbl, n*n+m*n+n) + } else if wantuo && wantvas { + // Path 3 (m much larger than n, jobU == Overwrite, jobVT == Store or All) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(n*n+wrkbl, n*n+m*n+n) + } else if wantus && wantvn { + // Path 4 (m much larger than n, jobU == Store, jobVT == None) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantus && wantvo { + // Path 5 (m much larger than n, jobU == Store, jobVT == Overwrite) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*n*n + wrkbl + } else if wantus && wantvas { + // Path 6 (m much larger than n, jobU == Store, jobVT == Store or All) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantua && wantvn { + // Path 7 (m much larger than n, jobU == All, jobVT == None) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantua && wantvo { + // Path 8 (m much larger than n, jobU == All, jobVT == Overwrite) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*n*n + wrkbl + } else if wantua && wantvas { + // Path 9 (m much larger than n, jobU == All, jobVT == Store or All) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } + } else { + // Path 10 (m at least n, but not much larger) + impl.Dgebrd(m, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + maxwrk = 3*n + lwork_dgebrd + if wantus || wantuo { + impl.Dorgbr(lapack.GenerateQ, m, n, n, a, lda, nil, work, -1) + lwork_dorgbr_q = int(work[0]) + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_q) + } + if wantua { + impl.Dorgbr(lapack.GenerateQ, m, m, n, a, lda, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_q) + } + if !wantvn { + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_p) + } + maxwrk = max(maxwrk, bdspac) + } + } else { + bdspac = 5 * m + + impl.Dgelqf(m, n, a, lda, nil, work, -1) + lwork_dgelqf := int(work[0]) + + impl.Dorglq(n, n, m, nil, n, nil, work, -1) + lwork_dorglq_n := int(work[0]) + impl.Dorglq(m, n, m, a, lda, nil, work, -1) + lwork_dorglq_m := int(work[0]) + + impl.Dgebrd(m, m, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + + impl.Dorgbr(lapack.GeneratePT, m, m, m, a, n, nil, work, -1) + lwork_dorgbr_p := int(work[0]) + + impl.Dorgbr(lapack.GenerateQ, m, m, m, a, n, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + + if n >= mnthr { + if wantvn { + // Path 1t (n much larger than m, jobVT == None) + maxwrk = m + lwork_dgelqf + maxwrk = max(maxwrk, 3*m+lwork_dgebrd) + if wantuo || wantuas { + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_q) + } + maxwrk = max(maxwrk, bdspac) + } else if wantvo && wantun { + // Path 2t (n much larger than m, jobU == None, jobVT == Overwrite) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(m*m+wrkbl, m*m+m*n+m) + } else if wantvo && wantuas { + // Path 3t (n much larger than m, jobU == Store or All, jobVT == Overwrite) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(m*m+wrkbl, m*m+m*n+m) + } else if wantvs && wantun { + // Path 4t (n much larger than m, jobU == None, jobVT == Store) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantvs && wantuo { + // Path 5t (n much larger than m, jobU == Overwrite, jobVT == Store) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*m*m + wrkbl + } else if wantvs && wantuas { + // Path 6t (n much larger than m, jobU == Store or All, jobVT == Store) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantva && wantun { + // Path 7t (n much larger than m, jobU== None, jobVT == All) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantva && wantuo { + // Path 8t (n much larger than m, jobU == Overwrite, jobVT == All) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*m*m + wrkbl + } else if wantva && wantuas { + // Path 9t (n much larger than m, jobU == Store or All, jobVT == All) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } + } else { + // Path 10t (n greater than m, but not much larger) + impl.Dgebrd(m, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd = int(work[0]) + maxwrk = 3*m + lwork_dgebrd + if wantvs || wantvo { + impl.Dorgbr(lapack.GeneratePT, m, n, m, a, n, nil, work, -1) + lwork_dorgbr_p = int(work[0]) + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_p) + } + if wantva { + impl.Dorgbr(lapack.GeneratePT, n, n, m, a, n, nil, work, -1) + lwork_dorgbr_p = int(work[0]) + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_p) + } + if !wantun { + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_q) + } + maxwrk = max(maxwrk, bdspac) + } + } + + maxwrk = max(maxwrk, minwork) + if lwork == -1 { + work[0] = float64(maxwrk) + return true + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + if len(s) < minmn { + panic(shortS) + } + if (len(u) < (m-1)*ldu+m && wantua) || (len(u) < (m-1)*ldu+minmn && wantus) { + panic(shortU) + } + if (len(vt) < (n-1)*ldvt+n && wantva) || (len(vt) < (minmn-1)*ldvt+n && wantvs) { + panic(shortVT) + } + + // Perform decomposition. + eps := dlamchE + smlnum := math.Sqrt(dlamchS) / eps + bignum := 1 / smlnum + + // Scale A if max element outside range [smlnum, bignum]. + anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) + var iscl bool + if anrm > 0 && anrm < smlnum { + iscl = true + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) + } else if anrm > bignum { + iscl = true + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) + } + + bi := blas64.Implementation() + var ie int + if m >= n { + // If A has sufficiently more rows than columns, use the QR decomposition. + if m >= mnthr { + // m >> n + if wantun { + // Path 1. + itau := 0 + iwork := itau + n + + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Zero out below R. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + ie = 0 + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + ncvt := 0 + if wantvo || wantvas { + impl.Dorgbr(lapack.GeneratePT, n, n, n, a, lda, work[itaup:], + work[iwork:], lwork-iwork) + ncvt = n + } + iwork = ie + n + + // Perform bidiagonal QR iteration computing right singular vectors + // of A in A if desired. + ok = impl.Dbdsqr(blas.Upper, n, ncvt, 0, 0, s, work[ie:], + a, lda, work, 1, work, 1, work[iwork:]) + + // If right singular vectors desired in VT, copy them there. + if wantvas { + impl.Dlacpy(blas.All, n, n, a, lda, vt, ldvt) + } + } else if wantuo && wantvn { + // Path 2 + panic(noSVDO) + } else if wantuo && wantvas { + // Path 3 + panic(noSVDO) + } else if wantus { + if wantvn { + // Path 4 + if lwork >= n*n+max(4*n, bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*n { + ldworkr = lda + } else { + ldworkr = n + } + itau := ir + ldworkr*n + iwork := itau + n + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to work[ir:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[ir+ldworkr:], ldworkr) + + // Generate Q in A. + impl.Dorgqr(m, n, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[ir:]. + impl.Dgebrd(n, n, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate left vectors bidiagonalizing R in work[ir:]. + impl.Dorgbr(lapack.GenerateQ, n, n, n, work[ir:], ldworkr, + work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, compuing left singular + // vectors of R in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, n, 0, n, 0, s, work[ie:], work, 1, + work[ir:], ldworkr, work, 1, work[iwork:]) + + // Multiply Q in A by left singular vectors of R in + // work[ir:], storing result in U. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, a, lda, + work[ir:], ldworkr, 0, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, n, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Zero out below R in A. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left vectors bidiagonalizing R. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + a, lda, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left + // singular vectors of A in U. + ok = impl.Dbdsqr(blas.Upper, n, 0, m, 0, s, work[ie:], work, 1, + u, ldu, work, 1, work[iwork:]) + } + } else if wantvo { + // Path 5 + panic(noSVDO) + } else if wantvas { + // Path 6 + if lwork >= n*n+max(4*n, bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*n { + ldworku = lda + } else { + ldworku = n + } + itau := iu + ldworku*n + iwork := itau + n + + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + // Copy R to work[iu:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[iu+ldworku:], ldworku) + + // Generate Q in A. + impl.Dorgqr(m, n, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[iu:], copying result to VT. + impl.Dgebrd(n, n, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, n, n, work[iu:], ldworku, vt, ldvt) + + // Generate left bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.GenerateQ, n, n, n, work[iu:], ldworku, + work[itauq:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[iu:], and computing right singular + // vectors of R in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, n, 0, s, work[ie:], + vt, ldvt, work[iu:], ldworku, work, 1, work[iwork:]) + + // Multiply Q in A by left singular vectors of R in + // work[iu:], storing result in U. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, a, lda, + work[iu:], ldworku, 0, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q * R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, n, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to VT, zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, vt[ldvt:], ldvt) + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } else if wantua { + if wantvn { + // Path 7 + if lwork >= n*n+max(max(n+m, 4*n), bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*n { + ldworkr = lda + } else { + ldworkr = n + } + itau := ir + ldworkr*n + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Copy R to work[ir:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[ir+ldworkr:], ldworkr) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[ir:]. + impl.Dgebrd(n, n, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in work[ir:]. + impl.Dorgbr(lapack.GenerateQ, n, n, n, work[ir:], ldworkr, + work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, n, 0, n, 0, s, work[ie:], work, 1, + work[ir:], ldworkr, work, 1, work[iwork:]) + + // Multiply Q in U by left singular vectors of R in + // work[ir:], storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, u, ldu, + work[ir:], ldworkr, 0, a, lda) + + // Copy left singular vectors of A from A to U. + impl.Dlacpy(blas.All, m, n, a, lda, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Zero out below R in A. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in A. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + a, lda, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left + // singular vectors of A in U. + ok = impl.Dbdsqr(blas.Upper, n, 0, m, 0, s, work[ie:], + work, 1, u, ldu, work, 1, work[iwork:]) + } + } else if wantvo { + // Path 8. + panic(noSVDO) + } else if wantvas { + // Path 9. + if lwork >= n*n+max(max(n+m, 4*n), bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*n { + ldworku = lda + } else { + ldworku = n + } + itau := iu + ldworku*n + iwork := itau + n + + // Compute A = Q * R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to work[iu:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[iu+ldworku:], ldworku) + + ie = itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[iu:], copying result to VT. + impl.Dgebrd(n, n, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, n, n, work[iu:], ldworku, vt, ldvt) + + // Generate left bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.GenerateQ, n, n, n, work[iu:], ldworku, + work[itauq:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[iu:] and computing right + // singular vectors of R in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, n, 0, s, work[ie:], + vt, ldvt, work[iu:], ldworku, work, 1, work[iwork:]) + + // Multiply Q in U by left singular vectors of R in + // work[iu:], storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, + u, ldu, work[iu:], ldworku, 0, a, lda) + + // Copy left singular vectors of A from A to U. + impl.Dlacpy(blas.All, m, n, a, lda, u, ldu) + + /* + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, + m, n, n, vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + */ + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R from A to VT, zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + if n > 1 { + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, vt[ldvt:], ldvt) + } + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, + m, n, n, vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } + } else { + // Path 10. + // M at least N, but not much larger. + ie = 0 + itauq := ie + n + itaup := itauq + n + iwork := itaup + n + + // Bidiagonalize A. + impl.Dgebrd(m, n, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + if wantuas { + // Left singular vectors are desired in U. Copy result to U and + // generate left biadiagonalizing vectors in U. + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + var ncu int + if wantus { + ncu = n + } + if wantua { + ncu = m + } + impl.Dorgbr(lapack.GenerateQ, m, ncu, n, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + } + if wantvas { + // Right singular vectors are desired in VT. Copy result to VT and + // generate left biadiagonalizing vectors in VT. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, work[itaup:], work[iwork:], lwork-iwork) + } + if wantuo { + panic(noSVDO) + } + if wantvo { + panic(noSVDO) + } + iwork = ie + n + var nru, ncvt int + if wantuas || wantuo { + nru = m + } + if wantun { + nru = 0 + } + if wantvas || wantvo { + ncvt = n + } + if wantvn { + ncvt = 0 + } + if !wantuo && !wantvo { + // Perform bidiagonal QR iteration, if desired, computing left + // singular vectors in U and right singular vectors in VT. + ok = impl.Dbdsqr(blas.Upper, n, ncvt, nru, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } else { + // There will be two branches when the implementation is complete. + panic(noSVDO) + } + } + } else { + // A has more columns than rows. If A has sufficiently more columns than + // rows, first reduce using the LQ decomposition. + if n >= mnthr { + // n >> m. + if wantvn { + // Path 1t. + itau := 0 + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Zero out above L. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + ie := 0 + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:itauq], + work[itauq:itaup], work[itaup:iwork], work[iwork:], lwork-iwork) + if wantuo || wantuas { + impl.Dorgbr(lapack.GenerateQ, m, m, m, a, lda, + work[itauq:], work[iwork:], lwork-iwork) + } + iwork = ie + m + nru := 0 + if wantuo || wantuas { + nru = m + } + + // Perform bidiagonal QR iteration, computing left singular vectors + // of A in A if desired. + ok = impl.Dbdsqr(blas.Upper, m, 0, nru, 0, s, work[ie:], + work, 1, a, lda, work, 1, work[iwork:]) + + // If left singular vectors desired in U, copy them there. + if wantuas { + impl.Dlacpy(blas.All, m, m, a, lda, u, ldu) + } + } else if wantvo && wantun { + // Path 2t. + panic(noSVDO) + } else if wantvo && wantuas { + // Path 3t. + panic(noSVDO) + } else if wantvs { + if wantun { + // Path 4t. + if lwork >= m*m+max(4*m, bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*m { + ldworkr = lda + } else { + ldworkr = m + } + itau := ir + ldworkr*m + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[ir:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[ir+1:], ldworkr) + + // Generate Q in A. + impl.Dorglq(m, n, m, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[ir:]. + impl.Dgebrd(m, m, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate right vectors bidiagonalizing L in work[ir:]. + impl.Dorgbr(lapack.GeneratePT, m, m, m, work[ir:], ldworkr, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right singular + // vectors of L in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, m, m, 0, 0, s, work[ie:], + work[ir:], ldworkr, work, 1, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[ir:] by + // Q in A, storing result in VT. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[ir:], ldworkr, a, lda, 0, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy result to VT. + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(m, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Zero out above L in A. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right vectors bidiagonalizing L by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + a, lda, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right + // singular vectors of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, 0, 0, s, work[ie:], + vt, ldvt, work, 1, work, 1, work[iwork:]) + } + } else if wantuo { + // Path 5t. + panic(noSVDO) + } else if wantuas { + // Path 6t. + if lwork >= m*m+max(4*m, bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*m { + ldworku = lda + } else { + ldworku = m + } + itau := iu + ldworku*m + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[iu:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[iu+1:], ldworku) + + // Generate Q in A. + impl.Dorglq(m, n, m, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[iu:], copying result to U. + impl.Dgebrd(m, m, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, m, work[iu:], ldworku, u, ldu) + + // Generate right bidiagionalizing vectors in work[iu:]. + impl.Dorgbr(lapack.GeneratePT, m, m, m, work[iu:], ldworku, + work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of L in U and computing right singular vectors of + // L in work[iu:]. + ok = impl.Dbdsqr(blas.Upper, m, m, m, 0, s, work[ie:], + work[iu:], ldworku, u, ldu, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[iu:] by + // Q in A, storing result in VT. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[iu:], ldworku, a, lda, 0, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L*Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(m, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to U, zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, u[1:], ldu) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in U. + impl.Dgebrd(m, m, u, ldu, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in U by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + u, ldu, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, m, 0, s, work[ie:], vt, ldvt, + u, ldu, work, 1, work[iwork:]) + } + } + } else if wantva { + if wantun { + // Path 7t. + if lwork >= m*m+max(max(n+m, 4*m), bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*m { + ldworkr = lda + } else { + ldworkr = m + } + itau := ir + ldworkr*m + iwork := itau + m + + // Compute A = L*Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Copy L to work[ir:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[ir+1:], ldworkr) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[ir:]. + impl.Dgebrd(m, m, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in work[ir:]. + impl.Dorgbr(lapack.GeneratePT, m, m, m, work[ir:], ldworkr, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right + // singular vectors of L in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, m, m, 0, 0, s, work[ie:], + work[ir:], ldworkr, work, 1, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[ir:] by + // Q in VT, storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[ir:], ldworkr, vt, ldvt, 0, a, lda) + + // Copy right singular vectors of A from A to VT. + impl.Dlacpy(blas.All, m, n, a, lda, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + // Compute A = L * Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Zero out above L in A. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in A by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + a, lda, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right singular + // vectors of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, 0, 0, s, work[ie:], + vt, ldvt, work, 1, work, 1, work[iwork:]) + } + } else if wantuo { + panic(noSVDO) + } else if wantuas { + // Path 9t. + if lwork >= m*m+max(max(m+n, 4*m), bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + + var ldworku int + if lwork >= wrkbl+lda*m { + ldworku = lda + } else { + ldworku = m + } + itau := iu + ldworku*m + iwork := itau + m + + // Generate A = L * Q copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[iu:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[iu+1:], ldworku) + ie = itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[iu:], copying result to U. + impl.Dgebrd(m, m, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, m, work[iu:], ldworku, u, ldu) + + // Generate right bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.GeneratePT, m, m, m, work[iu:], ldworku, + work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of L in U and computing right singular vectors + // of L in work[iu:]. + ok = impl.Dbdsqr(blas.Upper, m, m, m, 0, s, work[ie:], + work[iu:], ldworku, u, ldu, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[iu:] + // Q in VT, storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[iu:], ldworku, vt, ldvt, 0, a, lda) + + // Copy right singular vectors of A from A to VT. + impl.Dlacpy(blas.All, m, n, a, lda, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L * Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to U, zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, u[1:], ldu) + + ie = itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in U. + impl.Dgebrd(m, m, u, ldu, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in U by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + u, ldu, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } + } else { + // Path 10t. + // N at least M, but not much larger. + ie = 0 + itauq := ie + m + itaup := itauq + m + iwork := itaup + m + + // Bidiagonalize A. + impl.Dgebrd(m, n, a, lda, s, work[ie:], work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + if wantuas { + // If left singular vectors desired in U, copy result to U and + // generate left bidiagonalizing vectors in U. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dorgbr(lapack.GenerateQ, m, m, n, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + } + if wantvas { + // If right singular vectors desired in VT, copy result to VT + // and generate right bidiagonalizing vectors in VT. + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + var nrvt int + if wantva { + nrvt = n + } else { + nrvt = m + } + impl.Dorgbr(lapack.GeneratePT, nrvt, n, m, vt, ldvt, work[itaup:], work[iwork:], lwork-iwork) + } + if wantuo { + panic(noSVDO) + } + if wantvo { + panic(noSVDO) + } + iwork = ie + m + var nru, ncvt int + if wantuas || wantuo { + nru = m + } + if wantvas || wantvo { + ncvt = n + } + if !wantuo && !wantvo { + // Perform bidiagonal QR iteration, if desired, computing left + // singular vectors in U and computing right singular vectors in + // VT. + ok = impl.Dbdsqr(blas.Lower, m, ncvt, nru, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } else { + // There will be two branches when the implementation is complete. + panic(noSVDO) + } + } + } + if !ok { + if ie > 1 { + for i := 0; i < minmn-1; i++ { + work[i+1] = work[i+ie] + } + } + if ie < 1 { + for i := minmn - 2; i >= 0; i-- { + work[i+1] = work[i+ie] + } + } + } + // Undo scaling if necessary. + if iscl { + if anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bignum, anrm, 1, minmn, s, minmn) + } + if !ok && anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bignum, anrm, 1, minmn-1, work[1:], minmn) + } + if anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, smlnum, anrm, 1, minmn, s, minmn) + } + if !ok && anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, smlnum, anrm, 1, minmn-1, work[1:], minmn) + } + } + work[0] = float64(maxwrk) + return ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go new file mode 100644 index 00000000..63ad72e9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go @@ -0,0 +1,84 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetf2 computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of a into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Dgetf2 returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +// +// Dgetf2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dgetf2(m, n int, a []float64, lda int, ipiv []int) (ok bool) { + mn := min(m, n) + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if mn == 0 { + return true + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(ipiv) != mn: + panic(badLenIpiv) + } + + bi := blas64.Implementation() + + sfmin := dlamchS + ok = true + for j := 0; j < mn; j++ { + // Find a pivot and test for singularity. + jp := j + bi.Idamax(m-j, a[j*lda+j:], lda) + ipiv[j] = jp + if a[jp*lda+j] == 0 { + ok = false + } else { + // Swap the rows if necessary. + if jp != j { + bi.Dswap(n, a[j*lda:], 1, a[jp*lda:], 1) + } + if j < m-1 { + aj := a[j*lda+j] + if math.Abs(aj) >= sfmin { + bi.Dscal(m-j-1, 1/aj, a[(j+1)*lda+j:], lda) + } else { + for i := 0; i < m-j-1; i++ { + a[(j+1)*lda+j] = a[(j+1)*lda+j] / a[lda*j+j] + } + } + } + } + if j < mn-1 { + bi.Dger(m-j-1, n-j-1, -1, a[(j+1)*lda+j:], lda, a[j*lda+j+1:], 1, a[(j+1)*lda+j+1:], lda) + } + } + return ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go new file mode 100644 index 00000000..ad01e71e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go @@ -0,0 +1,85 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetrf computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of A into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Dgetrf is the blocked version of the algorithm. +// +// Dgetrf returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +func (impl Implementation) Dgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool) { + mn := min(m, n) + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if mn == 0 { + return true + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(ipiv) != mn: + panic(badLenIpiv) + } + + bi := blas64.Implementation() + + nb := impl.Ilaenv(1, "DGETRF", " ", m, n, -1, -1) + if nb <= 1 || mn <= nb { + // Use the unblocked algorithm. + return impl.Dgetf2(m, n, a, lda, ipiv) + } + ok = true + for j := 0; j < mn; j += nb { + jb := min(mn-j, nb) + blockOk := impl.Dgetf2(m-j, jb, a[j*lda+j:], lda, ipiv[j:j+jb]) + if !blockOk { + ok = false + } + for i := j; i <= min(m-1, j+jb-1); i++ { + ipiv[i] = j + ipiv[i] + } + impl.Dlaswp(j, a, lda, j, j+jb-1, ipiv[:j+jb], 1) + if j+jb < n { + impl.Dlaswp(n-j-jb, a[j+jb:], lda, j, j+jb-1, ipiv[:j+jb], 1) + bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.Unit, + jb, n-j-jb, 1, + a[j*lda+j:], lda, + a[j*lda+j+jb:], lda) + if j+jb < m { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m-j-jb, n-j-jb, jb, -1, + a[(j+jb)*lda+j:], lda, + a[j*lda+j+jb:], lda, + 1, a[(j+jb)*lda+j+jb:], lda) + } + } + } + return ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go new file mode 100644 index 00000000..b2f2ae46 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go @@ -0,0 +1,116 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetri computes the inverse of the matrix A using the LU factorization computed +// by Dgetrf. On entry, a contains the PLU decomposition of A as computed by +// Dgetrf and on exit contains the reciprocal of the original matrix. +// +// Dgetri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= n and this function will panic otherwise. +// Dgetri is a blocked inversion, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgetri, +// the optimal work length will be stored into work[0]. +func (impl Implementation) Dgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool) { + iws := max(1, n) + switch { + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < iws && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + if n == 0 { + work[0] = 1 + return true + } + + nb := impl.Ilaenv(1, "DGETRI", " ", n, -1, -1, -1) + if lwork == -1 { + work[0] = float64(n * nb) + return true + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(ipiv) != n: + panic(badLenIpiv) + } + + // Form inv(U). + ok = impl.Dtrtri(blas.Upper, blas.NonUnit, n, a, lda) + if !ok { + return false + } + + nbmin := 2 + if 1 < nb && nb < n { + iws = max(n*nb, 1) + if lwork < iws { + nb = lwork / n + nbmin = max(2, impl.Ilaenv(2, "DGETRI", " ", n, -1, -1, -1)) + } + } + ldwork := nb + + bi := blas64.Implementation() + // Solve the equation inv(A)*L = inv(U) for inv(A). + // TODO(btracey): Replace this with a more row-major oriented algorithm. + if nb < nbmin || n <= nb { + // Unblocked code. + for j := n - 1; j >= 0; j-- { + for i := j + 1; i < n; i++ { + // Copy current column of L to work and replace with zeros. + work[i] = a[i*lda+j] + a[i*lda+j] = 0 + } + // Compute current column of inv(A). + if j < n-1 { + bi.Dgemv(blas.NoTrans, n, n-j-1, -1, a[(j+1):], lda, work[(j+1):], 1, 1, a[j:], lda) + } + } + } else { + // Blocked code. + nn := ((n - 1) / nb) * nb + for j := nn; j >= 0; j -= nb { + jb := min(nb, n-j) + // Copy current block column of L to work and replace + // with zeros. + for jj := j; jj < j+jb; jj++ { + for i := jj + 1; i < n; i++ { + work[i*ldwork+(jj-j)] = a[i*lda+jj] + a[i*lda+jj] = 0 + } + } + // Compute current block column of inv(A). + if j+jb < n { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, jb, n-j-jb, -1, a[(j+jb):], lda, work[(j+jb)*ldwork:], ldwork, 1, a[j:], lda) + } + bi.Dtrsm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, jb, 1, work[j*ldwork:], ldwork, a[j:], lda) + } + } + // Apply column interchanges. + for j := n - 2; j >= 0; j-- { + jp := ipiv[j] + if jp != j { + bi.Dswap(n, a[j:], lda, a[jp:], lda) + } + } + work[0] = float64(iws) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go new file mode 100644 index 00000000..ecc20d7c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go @@ -0,0 +1,72 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetrs solves a system of equations using an LU factorization. +// The system of equations solved is +// A * X = B if trans == blas.Trans +// A^T * X = B if trans == blas.NoTrans +// A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. +// +// On entry b contains the elements of the matrix B. On exit, b contains the +// elements of X, the solution to the system of equations. +// +// a and ipiv contain the LU factorization of A and the permutation indices as +// computed by Dgetrf. ipiv is zero-indexed. +func (impl Implementation) Dgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int) { + switch { + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTrans) + case n < 0: + panic(nLT0) + case nrhs < 0: + panic(nrhsLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, nrhs): + panic(badLdB) + } + + // Quick return if possible. + if n == 0 || nrhs == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(b) < (n-1)*ldb+nrhs: + panic(shortB) + case len(ipiv) != n: + panic(badLenIpiv) + } + + bi := blas64.Implementation() + + if trans == blas.NoTrans { + // Solve A * X = B. + impl.Dlaswp(nrhs, b, ldb, 0, n-1, ipiv, 1) + // Solve L * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.Unit, + n, nrhs, 1, a, lda, b, ldb) + // Solve U * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Upper, blas.NoTrans, blas.NonUnit, + n, nrhs, 1, a, lda, b, ldb) + return + } + // Solve A^T * X = B. + // Solve U^T * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, + n, nrhs, 1, a, lda, b, ldb) + // Solve L^T * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Lower, blas.Trans, blas.Unit, + n, nrhs, 1, a, lda, b, ldb) + impl.Dlaswp(nrhs, b, ldb, 0, n-1, ipiv, -1) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go new file mode 100644 index 00000000..ac234dce --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go @@ -0,0 +1,242 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dggsvd3 computes the generalized singular value decomposition (GSVD) +// of an m×n matrix A and p×n matrix B: +// U^T*A*Q = D1*[ 0 R ] +// +// V^T*B*Q = D2*[ 0 R ] +// where U, V and Q are orthogonal matrices. +// +// Dggsvd3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// R is a (k+l)×(k+l) nonsingular upper triangular matrix, D1 and +// D2 are m×(k+l) and p×(k+l) diagonal matrices and of the following +// structures, respectively: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// Dggsvd3 computes C, S, R, and optionally the orthogonal transformation +// matrices U, V and Q. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// alpha and beta must have length n or Dggsvd3 will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, iwork contains the permutation required to sort alpha descending. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than n, otherwise Dggsvd3 will panic. If +// lwork is -1, work[0] holds the optimal lwork on return, but Dggsvd3 does +// not perform the GSVD. +func (impl Implementation) Dggsvd3(jobU, jobV, jobQ lapack.GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool) { + wantu := jobU == lapack.GSVDU + wantv := jobV == lapack.GSVDV + wantq := jobQ == lapack.GSVDQ + switch { + case !wantu && jobU != lapack.GSVDNone: + panic(badGSVDJob + "U") + case !wantv && jobV != lapack.GSVDNone: + panic(badGSVDJob + "V") + case !wantq && jobQ != lapack.GSVDNone: + panic(badGSVDJob + "Q") + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case p < 0: + panic(pLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldu < 1, wantu && ldu < m: + panic(badLdU) + case ldv < 1, wantv && ldv < p: + panic(badLdV) + case ldq < 1, wantq && ldq < n: + panic(badLdQ) + case len(iwork) < n: + panic(shortWork) + case lwork < 1 && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Determine optimal work length. + impl.Dggsvp3(jobU, jobV, jobQ, + m, p, n, + a, lda, + b, ldb, + 0, 0, + u, ldu, + v, ldv, + q, ldq, + iwork, + work, work, -1) + lwkopt := n + int(work[0]) + lwkopt = max(lwkopt, 2*n) + lwkopt = max(lwkopt, 1) + work[0] = float64(lwkopt) + if lwork == -1 { + return 0, 0, true + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(b) < (p-1)*ldb+n: + panic(shortB) + case wantu && len(u) < (m-1)*ldu+m: + panic(shortU) + case wantv && len(v) < (p-1)*ldv+p: + panic(shortV) + case wantq && len(q) < (n-1)*ldq+n: + panic(shortQ) + case len(alpha) != n: + panic(badLenAlpha) + case len(beta) != n: + panic(badLenBeta) + } + + // Compute the Frobenius norm of matrices A and B. + anorm := impl.Dlange(lapack.Frobenius, m, n, a, lda, nil) + bnorm := impl.Dlange(lapack.Frobenius, p, n, b, ldb, nil) + + // Get machine precision and set up threshold for determining + // the effective numerical rank of the matrices A and B. + tola := float64(max(m, n)) * math.Max(anorm, dlamchS) * dlamchP + tolb := float64(max(p, n)) * math.Max(bnorm, dlamchS) * dlamchP + + // Preprocessing. + k, l = impl.Dggsvp3(jobU, jobV, jobQ, + m, p, n, + a, lda, + b, ldb, + tola, tolb, + u, ldu, + v, ldv, + q, ldq, + iwork, + work[:n], work[n:], lwork-n) + + // Compute the GSVD of two upper "triangular" matrices. + _, ok = impl.Dtgsja(jobU, jobV, jobQ, + m, p, n, + k, l, + a, lda, + b, ldb, + tola, tolb, + alpha, beta, + u, ldu, + v, ldv, + q, ldq, + work) + + // Sort the singular values and store the pivot indices in iwork + // Copy alpha to work, then sort alpha in work. + bi := blas64.Implementation() + bi.Dcopy(n, alpha, 1, work[:n], 1) + ibnd := min(l, m-k) + for i := 0; i < ibnd; i++ { + // Scan for largest alpha_{k+i}. + isub := i + smax := work[k+i] + for j := i + 1; j < ibnd; j++ { + if v := work[k+j]; v > smax { + isub = j + smax = v + } + } + if isub != i { + work[k+isub] = work[k+i] + work[k+i] = smax + iwork[k+i] = k + isub + } else { + iwork[k+i] = k + i + } + } + + work[0] = float64(lwkopt) + + return k, l, ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go new file mode 100644 index 00000000..7a9ad9fb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go @@ -0,0 +1,281 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dggsvp3 computes orthogonal matrices U, V and Q such that +// +// n-k-l k l +// U^T*A*Q = k [ 0 A12 A13 ] if m-k-l >= 0; +// l [ 0 0 A23 ] +// m-k-l [ 0 0 0 ] +// +// n-k-l k l +// U^T*A*Q = k [ 0 A12 A13 ] if m-k-l < 0; +// m-k [ 0 0 A23 ] +// +// n-k-l k l +// V^T*B*Q = l [ 0 0 B13 ] +// p-l [ 0 0 0 ] +// +// where the k×k matrix A12 and l×l matrix B13 are non-singular +// upper triangular. A23 is l×l upper triangular if m-k-l >= 0, +// otherwise A23 is (m-k)×l upper trapezoidal. +// +// Dggsvp3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz +// iteration procedure. Generally, they are the same as used in the preprocessing +// step, for example, +// tola = max(m, n)*norm(A)*eps, +// tolb = max(p, n)*norm(B)*eps. +// Where eps is the machine epsilon. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than zero, otherwise Dggsvp3 will panic. +// +// Dggsvp3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dggsvp3(jobU, jobV, jobQ lapack.GSVDJob, m, p, n int, a []float64, lda int, b []float64, ldb int, tola, tolb float64, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, iwork []int, tau, work []float64, lwork int) (k, l int) { + wantu := jobU == lapack.GSVDU + wantv := jobV == lapack.GSVDV + wantq := jobQ == lapack.GSVDQ + switch { + case !wantu && jobU != lapack.GSVDNone: + panic(badGSVDJob + "U") + case !wantv && jobV != lapack.GSVDNone: + panic(badGSVDJob + "V") + case !wantq && jobQ != lapack.GSVDNone: + panic(badGSVDJob + "Q") + case m < 0: + panic(mLT0) + case p < 0: + panic(pLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldu < 1, wantu && ldu < m: + panic(badLdU) + case ldv < 1, wantv && ldv < p: + panic(badLdV) + case ldq < 1, wantq && ldq < n: + panic(badLdQ) + case len(iwork) != n: + panic(shortWork) + case lwork < 1 && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + var lwkopt int + impl.Dgeqp3(p, n, b, ldb, iwork, tau, work, -1) + lwkopt = int(work[0]) + if wantv { + lwkopt = max(lwkopt, p) + } + lwkopt = max(lwkopt, min(n, p)) + lwkopt = max(lwkopt, m) + if wantq { + lwkopt = max(lwkopt, n) + } + impl.Dgeqp3(m, n, a, lda, iwork, tau, work, -1) + lwkopt = max(lwkopt, int(work[0])) + lwkopt = max(1, lwkopt) + if lwork == -1 { + work[0] = float64(lwkopt) + return 0, 0 + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(b) < (p-1)*ldb+n: + panic(shortB) + case wantu && len(u) < (m-1)*ldu+m: + panic(shortU) + case wantv && len(v) < (p-1)*ldv+p: + panic(shortV) + case wantq && len(q) < (n-1)*ldq+n: + panic(shortQ) + case len(tau) < n: + // tau check must come after lwkopt query since + // the Dggsvd3 call for lwkopt query may have + // lwork == -1, and tau is provided by work. + panic(shortTau) + } + + const forward = true + + // QR with column pivoting of B: B*P = V*[ S11 S12 ]. + // [ 0 0 ] + for i := range iwork[:n] { + iwork[i] = 0 + } + impl.Dgeqp3(p, n, b, ldb, iwork, tau, work, lwork) + + // Update A := A*P. + impl.Dlapmt(forward, m, n, a, lda, iwork) + + // Determine the effective rank of matrix B. + for i := 0; i < min(p, n); i++ { + if math.Abs(b[i*ldb+i]) > tolb { + l++ + } + } + + if wantv { + // Copy the details of V, and form V. + impl.Dlaset(blas.All, p, p, 0, 0, v, ldv) + if p > 1 { + impl.Dlacpy(blas.Lower, p-1, min(p, n), b[ldb:], ldb, v[ldv:], ldv) + } + impl.Dorg2r(p, p, min(p, n), v, ldv, tau, work) + } + + // Clean up B. + for i := 1; i < l; i++ { + r := b[i*ldb : i*ldb+i] + for j := range r { + r[j] = 0 + } + } + if p > l { + impl.Dlaset(blas.All, p-l, n, 0, 0, b[l*ldb:], ldb) + } + + if wantq { + // Set Q = I and update Q := Q*P. + impl.Dlaset(blas.All, n, n, 0, 1, q, ldq) + impl.Dlapmt(forward, n, n, q, ldq, iwork) + } + + if p >= l && n != l { + // RQ factorization of [ S11 S12 ]: [ S11 S12 ] = [ 0 S12 ]*Z. + impl.Dgerq2(l, n, b, ldb, tau, work) + + // Update A := A*Z^T. + impl.Dormr2(blas.Right, blas.Trans, m, n, l, b, ldb, tau, a, lda, work) + + if wantq { + // Update Q := Q*Z^T. + impl.Dormr2(blas.Right, blas.Trans, n, n, l, b, ldb, tau, q, ldq, work) + } + + // Clean up B. + impl.Dlaset(blas.All, l, n-l, 0, 0, b, ldb) + for i := 1; i < l; i++ { + r := b[i*ldb+n-l : i*ldb+i+n-l] + for j := range r { + r[j] = 0 + } + } + } + + // Let N-L L + // A = [ A11 A12 ] M, + // + // then the following does the complete QR decomposition of A11: + // + // A11 = U*[ 0 T12 ]*P1^T. + // [ 0 0 ] + for i := range iwork[:n-l] { + iwork[i] = 0 + } + impl.Dgeqp3(m, n-l, a, lda, iwork[:n-l], tau, work, lwork) + + // Determine the effective rank of A11. + for i := 0; i < min(m, n-l); i++ { + if math.Abs(a[i*lda+i]) > tola { + k++ + } + } + + // Update A12 := U^T*A12, where A12 = A[0:m, n-l:n]. + impl.Dorm2r(blas.Left, blas.Trans, m, l, min(m, n-l), a, lda, tau, a[n-l:], lda, work) + + if wantu { + // Copy the details of U, and form U. + impl.Dlaset(blas.All, m, m, 0, 0, u, ldu) + if m > 1 { + impl.Dlacpy(blas.Lower, m-1, min(m, n-l), a[lda:], lda, u[ldu:], ldu) + } + impl.Dorg2r(m, m, min(m, n-l), u, ldu, tau, work) + } + + if wantq { + // Update Q[0:n, 0:n-l] := Q[0:n, 0:n-l]*P1. + impl.Dlapmt(forward, n, n-l, q, ldq, iwork[:n-l]) + } + + // Clean up A: set the strictly lower triangular part of + // A[0:k, 0:k] = 0, and A[k:m, 0:n-l] = 0. + for i := 1; i < k; i++ { + r := a[i*lda : i*lda+i] + for j := range r { + r[j] = 0 + } + } + if m > k { + impl.Dlaset(blas.All, m-k, n-l, 0, 0, a[k*lda:], lda) + } + + if n-l > k { + // RQ factorization of [ T11 T12 ] = [ 0 T12 ]*Z1. + impl.Dgerq2(k, n-l, a, lda, tau, work) + + if wantq { + // Update Q[0:n, 0:n-l] := Q[0:n, 0:n-l]*Z1^T. + impl.Dorm2r(blas.Right, blas.Trans, n, n-l, k, a, lda, tau, q, ldq, work) + } + + // Clean up A. + impl.Dlaset(blas.All, k, n-l-k, 0, 0, a, lda) + for i := 1; i < k; i++ { + r := a[i*lda+n-k-l : i*lda+i+n-k-l] + for j := range r { + a[j] = 0 + } + } + } + + if m > k { + // QR factorization of A[k:m, n-l:n]. + impl.Dgeqr2(m-k, l, a[k*lda+n-l:], lda, tau, work) + if wantu { + // Update U[:, k:m) := U[:, k:m]*U1. + impl.Dorm2r(blas.Right, blas.NoTrans, m, m-k, min(m-k, l), a[k*lda+n-l:], lda, tau, u[k:], ldu, work) + } + + // Clean up A. + for i := k + 1; i < m; i++ { + r := a[i*lda+n-l : i*lda+min(n-l+i-k, n)] + for j := range r { + r[j] = 0 + } + } + } + + work[0] = float64(lwkopt) + return k, l +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go new file mode 100644 index 00000000..ed3fbca8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go @@ -0,0 +1,252 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dhseqr computes the eigenvalues of an n×n Hessenberg matrix H and, +// optionally, the matrices T and Z from the Schur decomposition +// H = Z T Z^T, +// where T is an n×n upper quasi-triangular matrix (the Schur form), and Z is +// the n×n orthogonal matrix of Schur vectors. +// +// Optionally Z may be postmultiplied into an input orthogonal matrix Q so that +// this routine can give the Schur factorization of a matrix A which has been +// reduced to the Hessenberg form H by the orthogonal matrix Q: +// A = Q H Q^T = (QZ) T (QZ)^T. +// +// If job == lapack.EigenvaluesOnly, only the eigenvalues will be computed. +// If job == lapack.EigenvaluesAndSchur, the eigenvalues and the Schur form T will +// be computed. +// For other values of job Dhseqr will panic. +// +// If compz == lapack.SchurNone, no Schur vectors will be computed and Z will not be +// referenced. +// If compz == lapack.SchurHess, on return Z will contain the matrix of Schur +// vectors of H. +// If compz == lapack.SchurOrig, on entry z is assumed to contain the orthogonal +// matrix Q that is the identity except for the submatrix +// Q[ilo:ihi+1,ilo:ihi+1]. On return z will be updated to the product Q*Z. +// +// ilo and ihi determine the block of H on which Dhseqr operates. It is assumed +// that H is already upper triangular in rows and columns [0:ilo] and [ihi+1:n], +// although it will be only checked that the block is isolated, that is, +// ilo == 0 or H[ilo,ilo-1] == 0, +// ihi == n-1 or H[ihi+1,ihi] == 0, +// and Dhseqr will panic otherwise. ilo and ihi are typically set by a previous +// call to Dgebal, otherwise they should be set to 0 and n-1, respectively. It +// must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0. +// +// wr and wi must have length n. +// +// work must have length at least lwork and lwork must be at least max(1,n) +// otherwise Dhseqr will panic. The minimum lwork delivers very good and +// sometimes optimal performance, although lwork as large as 11*n may be +// required. On return, work[0] will contain the optimal value of lwork. +// +// If lwork is -1, instead of performing Dhseqr, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// unconverged indicates whether Dhseqr computed all the eigenvalues. +// +// If unconverged == 0, all the eigenvalues have been computed and their real +// and imaginary parts will be stored on return in wr and wi, respectively. If +// two eigenvalues are computed as a complex conjugate pair, they are stored in +// consecutive elements of wr and wi, say the i-th and (i+1)th, with wi[i] > 0 +// and wi[i+1] < 0. +// +// If unconverged == 0 and job == lapack.EigenvaluesAndSchur, on return H will +// contain the upper quasi-triangular matrix T from the Schur decomposition (the +// Schur form). 2×2 diagonal blocks (corresponding to complex conjugate pairs of +// eigenvalues) will be returned in standard form, with +// H[i,i] == H[i+1,i+1], +// and +// H[i+1,i]*H[i,i+1] < 0. +// The eigenvalues will be stored in wr and wi in the same order as on the +// diagonal of the Schur form returned in H, with +// wr[i] = H[i,i], +// and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, +// wi[i] = sqrt(-H[i+1,i]*H[i,i+1]), +// wi[i+1] = -wi[i]. +// +// If unconverged == 0 and job == lapack.EigenvaluesOnly, the contents of h +// on return is unspecified. +// +// If unconverged > 0, some eigenvalues have not converged, and the blocks +// [0:ilo] and [unconverged:n] of wr and wi will contain those eigenvalues which +// have been successfully computed. Failures are rare. +// +// If unconverged > 0 and job == lapack.EigenvaluesOnly, on return the +// remaining unconverged eigenvalues are the eigenvalues of the upper Hessenberg +// matrix H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged > 0 and job == lapack.EigenvaluesAndSchur, then on +// return +// (initial H) U = U (final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged > 0 and compz == lapack.SchurOrig, then on return +// (final Z) = (initial Z) U, +// where U is the orthogonal matrix in (*) regardless of the value of job. +// +// If unconverged > 0 and compz == lapack.SchurHess, then on return +// (final Z) = U, +// where U is the orthogonal matrix in (*) regardless of the value of job. +// +// References: +// [1] R. Byers. LAPACK 3.1 xHSEQR: Tuning and Implementation Notes on the +// Small Bulge Multi-Shift QR Algorithm with Aggressive Early Deflation. +// LAPACK Working Note 187 (2007) +// URL: http://www.netlib.org/lapack/lawnspdf/lawn187.pdf +// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: +// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix +// Anal. Appl. 23(4) (2002), pp. 929—947 +// URL: http://dx.doi.org/10.1137/S0895479801384573 +// [3] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +// Dhseqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dhseqr(job lapack.SchurJob, compz lapack.SchurComp, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, z []float64, ldz int, work []float64, lwork int) (unconverged int) { + wantt := job == lapack.EigenvaluesAndSchur + wantz := compz == lapack.SchurHess || compz == lapack.SchurOrig + + switch { + case job != lapack.EigenvaluesOnly && job != lapack.EigenvaluesAndSchur: + panic(badSchurJob) + case compz != lapack.SchurNone && compz != lapack.SchurHess && compz != lapack.SchurOrig: + panic(badSchurComp) + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case ldh < max(1, n): + panic(badLdH) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return 0 + } + + // Quick return in case of a workspace query. + if lwork == -1 { + impl.Dlaqr04(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, ilo, ihi, z, ldz, work, -1, 1) + work[0] = math.Max(float64(n), work[0]) + return 0 + } + + switch { + case len(h) < (n-1)*ldh+n: + panic(shortH) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + case len(wr) < n: + panic(shortWr) + case len(wi) < n: + panic(shortWi) + } + + const ( + // Matrices of order ntiny or smaller must be processed by + // Dlahqr because of insufficient subdiagonal scratch space. + // This is a hard limit. + ntiny = 11 + + // nl is the size of a local workspace to help small matrices + // through a rare Dlahqr failure. nl > ntiny is required and + // nl <= nmin = Ilaenv(ispec=12,...) is recommended (the default + // value of nmin is 75). Using nl = 49 allows up to six + // simultaneous shifts and a 16×16 deflation window. + nl = 49 + ) + + // Copy eigenvalues isolated by Dgebal. + for i := 0; i < ilo; i++ { + wr[i] = h[i*ldh+i] + wi[i] = 0 + } + for i := ihi + 1; i < n; i++ { + wr[i] = h[i*ldh+i] + wi[i] = 0 + } + + // Initialize Z to identity matrix if requested. + if compz == lapack.SchurHess { + impl.Dlaset(blas.All, n, n, 0, 1, z, ldz) + } + + // Quick return if possible. + if ilo == ihi { + wr[ilo] = h[ilo*ldh+ilo] + wi[ilo] = 0 + return 0 + } + + // Dlahqr/Dlaqr04 crossover point. + nmin := impl.Ilaenv(12, "DHSEQR", string(job)+string(compz), n, ilo, ihi, lwork) + nmin = max(ntiny, nmin) + + if n > nmin { + // Dlaqr0 for big matrices. + unconverged = impl.Dlaqr04(wantt, wantz, n, ilo, ihi, h, ldh, wr[:ihi+1], wi[:ihi+1], + ilo, ihi, z, ldz, work, lwork, 1) + } else { + // Dlahqr for small matrices. + unconverged = impl.Dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr[:ihi+1], wi[:ihi+1], + ilo, ihi, z, ldz) + if unconverged > 0 { + // A rare Dlahqr failure! Dlaqr04 sometimes succeeds + // when Dlahqr fails. + kbot := unconverged + if n >= nl { + // Larger matrices have enough subdiagonal + // scratch space to call Dlaqr04 directly. + unconverged = impl.Dlaqr04(wantt, wantz, n, ilo, kbot, h, ldh, + wr[:ihi+1], wi[:ihi+1], ilo, ihi, z, ldz, work, lwork, 1) + } else { + // Tiny matrices don't have enough subdiagonal + // scratch space to benefit from Dlaqr04. Hence, + // tiny matrices must be copied into a larger + // array before calling Dlaqr04. + var hl [nl * nl]float64 + impl.Dlacpy(blas.All, n, n, h, ldh, hl[:], nl) + impl.Dlaset(blas.All, nl, nl-n, 0, 0, hl[n:], nl) + var workl [nl]float64 + unconverged = impl.Dlaqr04(wantt, wantz, nl, ilo, kbot, hl[:], nl, + wr[:ihi+1], wi[:ihi+1], ilo, ihi, z, ldz, workl[:], nl, 1) + work[0] = workl[0] + if wantt || unconverged > 0 { + impl.Dlacpy(blas.All, n, n, hl[:], nl, h, ldh) + } + } + } + } + // Zero out under the first subdiagonal, if necessary. + if (wantt || unconverged > 0) && n > 2 { + impl.Dlaset(blas.Lower, n-2, n-2, 0, 0, h[2*ldh:], ldh) + } + + work[0] = math.Max(float64(n), work[0]) + return unconverged +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go new file mode 100644 index 00000000..babc0b7c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go @@ -0,0 +1,173 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlabrd reduces the first NB rows and columns of a real general m×n matrix +// A to upper or lower bidiagonal form by an orthogonal transformation +// Q**T * A * P +// If m >= n, A is reduced to upper bidiagonal form and upon exit the elements +// on and below the diagonal in the first nb columns represent the elementary +// reflectors, and the elements above the diagonal in the first nb rows represent +// the matrix P. If m < n, A is reduced to lower bidiagonal form and the elements +// P is instead stored above the diagonal. +// +// The reduction to bidiagonal form is stored in d and e, where d are the diagonal +// elements, and e are the off-diagonal elements. +// +// The matrices Q and P are products of elementary reflectors +// Q = H_0 * H_1 * ... * H_{nb-1} +// P = G_0 * G_1 * ... * G_{nb-1} +// where +// H_i = I - tauQ[i] * v_i * v_i^T +// G_i = I - tauP[i] * u_i * u_i^T +// +// As an example, on exit the entries of A when m = 6, n = 5, and nb = 2 +// [ 1 1 u1 u1 u1] +// [v1 1 1 u2 u2] +// [v1 v2 a a a] +// [v1 v2 a a a] +// [v1 v2 a a a] +// [v1 v2 a a a] +// and when m = 5, n = 6, and nb = 2 +// [ 1 u1 u1 u1 u1 u1] +// [ 1 1 u2 u2 u2 u2] +// [v1 1 a a a a] +// [v1 v2 a a a a] +// [v1 v2 a a a a] +// +// Dlabrd also returns the matrices X and Y which are used with U and V to +// apply the transformation to the unreduced part of the matrix +// A := A - V*Y^T - X*U^T +// and returns the matrices X and Y which are needed to apply the +// transformation to the unreduced part of A. +// +// X is an m×nb matrix, Y is an n×nb matrix. d, e, taup, and tauq must all have +// length at least nb. Dlabrd will panic if these size constraints are violated. +// +// Dlabrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlabrd(m, n, nb int, a []float64, lda int, d, e, tauQ, tauP, x []float64, ldx int, y []float64, ldy int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case nb < 0: + panic(nbLT0) + case nb > n: + panic(nbGTN) + case nb > m: + panic(nbGTM) + case lda < max(1, n): + panic(badLdA) + case ldx < max(1, nb): + panic(badLdX) + case ldy < max(1, nb): + panic(badLdY) + } + + if m == 0 || n == 0 || nb == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(d) < nb: + panic(shortD) + case len(e) < nb: + panic(shortE) + case len(tauQ) < nb: + panic(shortTauQ) + case len(tauP) < nb: + panic(shortTauP) + case len(x) < (m-1)*ldx+nb: + panic(shortX) + case len(y) < (n-1)*ldy+nb: + panic(shortY) + } + + bi := blas64.Implementation() + + if m >= n { + // Reduce to upper bidiagonal form. + for i := 0; i < nb; i++ { + bi.Dgemv(blas.NoTrans, m-i, i, -1, a[i*lda:], lda, y[i*ldy:], 1, 1, a[i*lda+i:], lda) + bi.Dgemv(blas.NoTrans, m-i, i, -1, x[i*ldx:], ldx, a[i:], lda, 1, a[i*lda+i:], lda) + + a[i*lda+i], tauQ[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min(i+1, m-1)*lda+i:], lda) + d[i] = a[i*lda+i] + if i < n-1 { + // Compute Y[i+1:n, i]. + a[i*lda+i] = 1 + bi.Dgemv(blas.Trans, m-i, n-i-1, 1, a[i*lda+i+1:], lda, a[i*lda+i:], lda, 0, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i, i, 1, a[i*lda:], lda, a[i*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, y[(i+1)*ldy:], ldy, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i, i, 1, x[i*ldx:], ldx, a[i*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.Trans, i, n-i-1, -1, a[i+1:], lda, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dscal(n-i-1, tauQ[i], y[(i+1)*ldy+i:], ldy) + + // Update A[i, i+1:n]. + bi.Dgemv(blas.NoTrans, n-i-1, i+1, -1, y[(i+1)*ldy:], ldy, a[i*lda:], 1, 1, a[i*lda+i+1:], 1) + bi.Dgemv(blas.Trans, i, n-i-1, -1, a[i+1:], lda, x[i*ldx:], 1, 1, a[i*lda+i+1:], 1) + + // Generate reflection P[i] to annihilate A[i, i+2:n]. + a[i*lda+i+1], tauP[i] = impl.Dlarfg(n-i-1, a[i*lda+i+1], a[i*lda+min(i+2, n-1):], 1) + e[i] = a[i*lda+i+1] + a[i*lda+i+1] = 1 + + // Compute X[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, n-i-1, 1, a[(i+1)*lda+i+1:], lda, a[i*lda+i+1:], 1, 0, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.Trans, n-i-1, i+1, 1, y[(i+1)*ldy:], ldy, a[i*lda+i+1:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i+1, -1, a[(i+1)*lda:], lda, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.NoTrans, i, n-i-1, 1, a[i+1:], lda, a[i*lda+i+1:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, x[(i+1)*ldx:], ldx, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dscal(m-i-1, tauP[i], x[(i+1)*ldx+i:], ldx) + } + } + return + } + // Reduce to lower bidiagonal form. + for i := 0; i < nb; i++ { + // Update A[i,i:n] + bi.Dgemv(blas.NoTrans, n-i, i, -1, y[i*ldy:], ldy, a[i*lda:], 1, 1, a[i*lda+i:], 1) + bi.Dgemv(blas.Trans, i, n-i, -1, a[i:], lda, x[i*ldx:], 1, 1, a[i*lda+i:], 1) + + // Generate reflection P[i] to annihilate A[i, i+1:n] + a[i*lda+i], tauP[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + d[i] = a[i*lda+i] + if i < m-1 { + a[i*lda+i] = 1 + // Compute X[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, n-i, 1, a[(i+1)*lda+i:], lda, a[i*lda+i:], 1, 0, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.Trans, n-i, i, 1, y[i*ldy:], ldy, a[i*lda+i:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, a[(i+1)*lda:], lda, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.NoTrans, i, n-i, 1, a[i:], lda, a[i*lda+i:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, x[(i+1)*ldx:], ldx, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dscal(m-i-1, tauP[i], x[(i+1)*ldx+i:], ldx) + + // Update A[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, a[(i+1)*lda:], lda, y[i*ldy:], 1, 1, a[(i+1)*lda+i:], lda) + bi.Dgemv(blas.NoTrans, m-i-1, i+1, -1, x[(i+1)*ldx:], ldx, a[i:], lda, 1, a[(i+1)*lda+i:], lda) + + // Generate reflection Q[i] to annihilate A[i+2:m, i]. + a[(i+1)*lda+i], tauQ[i] = impl.Dlarfg(m-i-1, a[(i+1)*lda+i], a[min(i+2, m-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + a[(i+1)*lda+i] = 1 + + // Compute Y[i+1:n, i]. + bi.Dgemv(blas.Trans, m-i-1, n-i-1, 1, a[(i+1)*lda+i+1:], lda, a[(i+1)*lda+i:], lda, 0, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i-1, i, 1, a[(i+1)*lda:], lda, a[(i+1)*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, y[(i+1)*ldy:], ldy, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i-1, i+1, 1, x[(i+1)*ldx:], ldx, a[(i+1)*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.Trans, i+1, n-i-1, -1, a[i+1:], lda, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dscal(n-i-1, tauQ[i], y[(i+1)*ldy+i:], ldy) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go new file mode 100644 index 00000000..e8ac1e43 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlacn2 estimates the 1-norm of an n×n matrix A using sequential updates with +// matrix-vector products provided externally. +// +// Dlacn2 is called sequentially and it returns the value of est and kase to be +// used on the next call. +// On the initial call, kase must be 0. +// In between calls, x must be overwritten by +// A * X if kase was returned as 1, +// A^T * X if kase was returned as 2, +// and all other parameters must not be changed. +// On the final return, kase is returned as 0, v contains A*W where W is a +// vector, and est = norm(V)/norm(W) is a lower bound for 1-norm of A. +// +// v, x, and isgn must all have length n and n must be at least 1, otherwise +// Dlacn2 will panic. isave is used for temporary storage. +// +// Dlacn2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlacn2(n int, v, x []float64, isgn []int, est float64, kase int, isave *[3]int) (float64, int) { + switch { + case n < 1: + panic(nLT1) + case len(v) < n: + panic(shortV) + case len(x) < n: + panic(shortX) + case len(isgn) < n: + panic(shortIsgn) + case isave[0] < 0 || 5 < isave[0]: + panic(badIsave) + case isave[0] == 0 && kase != 0: + panic(badIsave) + } + + const itmax = 5 + bi := blas64.Implementation() + + if kase == 0 { + for i := 0; i < n; i++ { + x[i] = 1 / float64(n) + } + kase = 1 + isave[0] = 1 + return est, kase + } + switch isave[0] { + case 1: + if n == 1 { + v[0] = x[0] + est = math.Abs(v[0]) + kase = 0 + return est, kase + } + est = bi.Dasum(n, x, 1) + for i := 0; i < n; i++ { + x[i] = math.Copysign(1, x[i]) + isgn[i] = int(x[i]) + } + kase = 2 + isave[0] = 2 + return est, kase + case 2: + isave[1] = bi.Idamax(n, x, 1) + isave[2] = 2 + for i := 0; i < n; i++ { + x[i] = 0 + } + x[isave[1]] = 1 + kase = 1 + isave[0] = 3 + return est, kase + case 3: + bi.Dcopy(n, x, 1, v, 1) + estold := est + est = bi.Dasum(n, v, 1) + sameSigns := true + for i := 0; i < n; i++ { + if int(math.Copysign(1, x[i])) != isgn[i] { + sameSigns = false + break + } + } + if !sameSigns && est > estold { + for i := 0; i < n; i++ { + x[i] = math.Copysign(1, x[i]) + isgn[i] = int(x[i]) + } + kase = 2 + isave[0] = 4 + return est, kase + } + case 4: + jlast := isave[1] + isave[1] = bi.Idamax(n, x, 1) + if x[jlast] != math.Abs(x[isave[1]]) && isave[2] < itmax { + isave[2] += 1 + for i := 0; i < n; i++ { + x[i] = 0 + } + x[isave[1]] = 1 + kase = 1 + isave[0] = 3 + return est, kase + } + case 5: + tmp := 2 * (bi.Dasum(n, x, 1)) / float64(3*n) + if tmp > est { + bi.Dcopy(n, x, 1, v, 1) + est = tmp + } + kase = 0 + return est, kase + } + // Iteration complete. Final stage + altsgn := 1.0 + for i := 0; i < n; i++ { + x[i] = altsgn * (1 + float64(i)/float64(n-1)) + altsgn *= -1 + } + kase = 1 + isave[0] = 5 + return est, kase +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go new file mode 100644 index 00000000..a37f3b0d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go @@ -0,0 +1,59 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlacpy copies the elements of A specified by uplo into B. Uplo can specify +// a triangular portion with blas.Upper or blas.Lower, or can specify all of the +// elemest with blas.All. +// +// Dlacpy is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlacpy(uplo blas.Uplo, m, n int, a []float64, lda int, b []float64, ldb int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower && uplo != blas.All: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + if m == 0 || n == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(b) < (m-1)*ldb+n: + panic(shortB) + } + + switch uplo { + case blas.Upper: + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + case blas.Lower: + for i := 0; i < m; i++ { + for j := 0; j < min(i+1, n); j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + case blas.All: + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go new file mode 100644 index 00000000..c071fec7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go @@ -0,0 +1,49 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlae2 computes the eigenvalues of a 2×2 symmetric matrix +// [a b] +// [b c] +// and returns the eigenvalue with the larger absolute value as rt1 and the +// smaller as rt2. +// +// Dlae2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlae2(a, b, c float64) (rt1, rt2 float64) { + sm := a + c + df := a - c + adf := math.Abs(df) + tb := b + b + ab := math.Abs(tb) + acmx := c + acmn := a + if math.Abs(a) > math.Abs(c) { + acmx = a + acmn = c + } + var rt float64 + if adf > ab { + rt = adf * math.Sqrt(1+(ab/adf)*(ab/adf)) + } else if adf < ab { + rt = ab * math.Sqrt(1+(adf/ab)*(adf/ab)) + } else { + rt = ab * math.Sqrt2 + } + if sm < 0 { + rt1 = 0.5 * (sm - rt) + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + return rt1, rt2 + } + if sm > 0 { + rt1 = 0.5 * (sm + rt) + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + return rt1, rt2 + } + rt1 = 0.5 * rt + rt2 = -0.5 * rt + return rt1, rt2 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go new file mode 100644 index 00000000..74d75b91 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go @@ -0,0 +1,82 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaev2 computes the Eigen decomposition of a symmetric 2×2 matrix. +// The matrix is given by +// [a b] +// [b c] +// Dlaev2 returns rt1 and rt2, the eigenvalues of the matrix where |RT1| > |RT2|, +// and [cs1, sn1] which is the unit right eigenvalue for RT1. +// [ cs1 sn1] [a b] [cs1 -sn1] = [rt1 0] +// [-sn1 cs1] [b c] [sn1 cs1] [ 0 rt2] +// +// Dlaev2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaev2(a, b, c float64) (rt1, rt2, cs1, sn1 float64) { + sm := a + c + df := a - c + adf := math.Abs(df) + tb := b + b + ab := math.Abs(tb) + acmx := c + acmn := a + if math.Abs(a) > math.Abs(c) { + acmx = a + acmn = c + } + var rt float64 + if adf > ab { + rt = adf * math.Sqrt(1+(ab/adf)*(ab/adf)) + } else if adf < ab { + rt = ab * math.Sqrt(1+(adf/ab)*(adf/ab)) + } else { + rt = ab * math.Sqrt(2) + } + var sgn1 float64 + if sm < 0 { + rt1 = 0.5 * (sm - rt) + sgn1 = -1 + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + } else if sm > 0 { + rt1 = 0.5 * (sm + rt) + sgn1 = 1 + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + } else { + rt1 = 0.5 * rt + rt2 = -0.5 * rt + sgn1 = 1 + } + var cs, sgn2 float64 + if df >= 0 { + cs = df + rt + sgn2 = 1 + } else { + cs = df - rt + sgn2 = -1 + } + acs := math.Abs(cs) + if acs > ab { + ct := -tb / cs + sn1 = 1 / math.Sqrt(1+ct*ct) + cs1 = ct * sn1 + } else { + if ab == 0 { + cs1 = 1 + sn1 = 0 + } else { + tn := -cs / tb + cs1 = 1 / math.Sqrt(1+tn*tn) + sn1 = tn * cs1 + } + } + if sgn1 == sgn2 { + tn := cs1 + cs1 = -sn1 + sn1 = tn + } + return rt1, rt2, cs1, sn1 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go new file mode 100644 index 00000000..2b79bd8a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go @@ -0,0 +1,269 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlaexc swaps two adjacent diagonal blocks of order 1 or 2 in an n×n upper +// quasi-triangular matrix T by an orthogonal similarity transformation. +// +// T must be in Schur canonical form, that is, block upper triangular with 1×1 +// and 2×2 diagonal blocks; each 2×2 diagonal block has its diagonal elements +// equal and its off-diagonal elements of opposite sign. On return, T will +// contain the updated matrix again in Schur canonical form. +// +// If wantq is true, the transformation is accumulated in the n×n matrix Q, +// otherwise Q is not referenced. +// +// j1 is the index of the first row of the first block. n1 and n2 are the order +// of the first and second block, respectively. +// +// work must have length at least n, otherwise Dlaexc will panic. +// +// If ok is false, the transformed matrix T would be too far from Schur form. +// The blocks are not swapped, and T and Q are not modified. +// +// If n1 and n2 are both equal to 1, Dlaexc will always return true. +// +// Dlaexc is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaexc(wantq bool, n int, t []float64, ldt int, q []float64, ldq int, j1, n1, n2 int, work []float64) (ok bool) { + switch { + case n < 0: + panic(nLT0) + case ldt < max(1, n): + panic(badLdT) + case wantq && ldt < max(1, n): + panic(badLdQ) + case j1 < 0 || n <= j1: + panic(badJ1) + case len(work) < n: + panic(shortWork) + case n1 < 0 || 2 < n1: + panic(badN1) + case n2 < 0 || 2 < n2: + panic(badN2) + } + + if n == 0 || n1 == 0 || n2 == 0 { + return true + } + + switch { + case len(t) < (n-1)*ldt+n: + panic(shortT) + case wantq && len(q) < (n-1)*ldq+n: + panic(shortQ) + } + + if j1+n1 >= n { + // TODO(vladimir-ch): Reference LAPACK does this check whether + // the start of the second block is in the matrix T. It returns + // true if it is not and moreover it does not check whether the + // whole second block fits into T. This does not feel + // satisfactory. The only caller of Dlaexc is Dtrexc, so if the + // caller makes sure that this does not happen, we could be + // stricter here. + return true + } + + j2 := j1 + 1 + j3 := j1 + 2 + + bi := blas64.Implementation() + + if n1 == 1 && n2 == 1 { + // Swap two 1×1 blocks. + t11 := t[j1*ldt+j1] + t22 := t[j2*ldt+j2] + + // Determine the transformation to perform the interchange. + cs, sn, _ := impl.Dlartg(t[j1*ldt+j2], t22-t11) + + // Apply transformation to the matrix T. + if n-j3 > 0 { + bi.Drot(n-j3, t[j1*ldt+j3:], 1, t[j2*ldt+j3:], 1, cs, sn) + } + if j1 > 0 { + bi.Drot(j1, t[j1:], ldt, t[j2:], ldt, cs, sn) + } + + t[j1*ldt+j1] = t22 + t[j2*ldt+j2] = t11 + + if wantq { + // Accumulate transformation in the matrix Q. + bi.Drot(n, q[j1:], ldq, q[j2:], ldq, cs, sn) + } + + return true + } + + // Swapping involves at least one 2×2 block. + // + // Copy the diagonal block of order n1+n2 to the local array d and + // compute its norm. + nd := n1 + n2 + var d [16]float64 + const ldd = 4 + impl.Dlacpy(blas.All, nd, nd, t[j1*ldt+j1:], ldt, d[:], ldd) + dnorm := impl.Dlange(lapack.MaxAbs, nd, nd, d[:], ldd, work) + + // Compute machine-dependent threshold for test for accepting swap. + eps := dlamchP + thresh := math.Max(10*eps*dnorm, dlamchS/eps) + + // Solve T11*X - X*T22 = scale*T12 for X. + var x [4]float64 + const ldx = 2 + scale, _, _ := impl.Dlasy2(false, false, -1, n1, n2, d[:], ldd, d[n1*ldd+n1:], ldd, d[n1:], ldd, x[:], ldx) + + // Swap the adjacent diagonal blocks. + switch { + case n1 == 1 && n2 == 2: + // Generate elementary reflector H so that + // ( scale, X11, X12 ) H = ( 0, 0, * ) + u := [3]float64{scale, x[0], 1} + _, tau := impl.Dlarfg(3, x[1], u[:2], 1) + t11 := t[j1*ldt+j1] + + // Perform swap provisionally on diagonal block in d. + impl.Dlarfx(blas.Left, 3, 3, u[:], tau, d[:], ldd, work) + impl.Dlarfx(blas.Right, 3, 3, u[:], tau, d[:], ldd, work) + + // Test whether to reject swap. + if math.Max(math.Abs(d[2*ldd]), math.Max(math.Abs(d[2*ldd+1]), math.Abs(d[2*ldd+2]-t11))) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + impl.Dlarfx(blas.Left, 3, n-j1, u[:], tau, t[j1*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j2+1, 3, u[:], tau, t[j1:], ldt, work) + + t[j3*ldt+j1] = 0 + t[j3*ldt+j2] = 0 + t[j3*ldt+j3] = t11 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u[:], tau, q[j1:], ldq, work) + } + + case n1 == 2 && n2 == 1: + // Generate elementary reflector H so that: + // H ( -X11 ) = ( * ) + // ( -X21 ) = ( 0 ) + // ( scale ) = ( 0 ) + u := [3]float64{1, -x[ldx], scale} + _, tau := impl.Dlarfg(3, -x[0], u[1:], 1) + t33 := t[j3*ldt+j3] + + // Perform swap provisionally on diagonal block in D. + impl.Dlarfx(blas.Left, 3, 3, u[:], tau, d[:], ldd, work) + impl.Dlarfx(blas.Right, 3, 3, u[:], tau, d[:], ldd, work) + + // Test whether to reject swap. + if math.Max(math.Abs(d[ldd]), math.Max(math.Abs(d[2*ldd]), math.Abs(d[0]-t33))) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + impl.Dlarfx(blas.Right, j3+1, 3, u[:], tau, t[j1:], ldt, work) + impl.Dlarfx(blas.Left, 3, n-j1-1, u[:], tau, t[j1*ldt+j2:], ldt, work) + + t[j1*ldt+j1] = t33 + t[j2*ldt+j1] = 0 + t[j3*ldt+j1] = 0 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u[:], tau, q[j1:], ldq, work) + } + + default: // n1 == 2 && n2 == 2 + // Generate elementary reflectors H_1 and H_2 so that: + // H_2 H_1 ( -X11 -X12 ) = ( * * ) + // ( -X21 -X22 ) ( 0 * ) + // ( scale 0 ) ( 0 0 ) + // ( 0 scale ) ( 0 0 ) + u1 := [3]float64{1, -x[ldx], scale} + _, tau1 := impl.Dlarfg(3, -x[0], u1[1:], 1) + + temp := -tau1 * (x[1] + u1[1]*x[ldx+1]) + u2 := [3]float64{1, -temp * u1[2], scale} + _, tau2 := impl.Dlarfg(3, -temp*u1[1]-x[ldx+1], u2[1:], 1) + + // Perform swap provisionally on diagonal block in D. + impl.Dlarfx(blas.Left, 3, 4, u1[:], tau1, d[:], ldd, work) + impl.Dlarfx(blas.Right, 4, 3, u1[:], tau1, d[:], ldd, work) + impl.Dlarfx(blas.Left, 3, 4, u2[:], tau2, d[ldd:], ldd, work) + impl.Dlarfx(blas.Right, 4, 3, u2[:], tau2, d[1:], ldd, work) + + // Test whether to reject swap. + m1 := math.Max(math.Abs(d[2*ldd]), math.Abs(d[2*ldd+1])) + m2 := math.Max(math.Abs(d[3*ldd]), math.Abs(d[3*ldd+1])) + if math.Max(m1, m2) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + j4 := j1 + 3 + impl.Dlarfx(blas.Left, 3, n-j1, u1[:], tau1, t[j1*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j4+1, 3, u1[:], tau1, t[j1:], ldt, work) + impl.Dlarfx(blas.Left, 3, n-j1, u2[:], tau2, t[j2*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j4+1, 3, u2[:], tau2, t[j2:], ldt, work) + + t[j3*ldt+j1] = 0 + t[j3*ldt+j2] = 0 + t[j4*ldt+j1] = 0 + t[j4*ldt+j2] = 0 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u1[:], tau1, q[j1:], ldq, work) + impl.Dlarfx(blas.Right, n, 3, u2[:], tau2, q[j2:], ldq, work) + } + } + + if n2 == 2 { + // Standardize new 2×2 block T11. + a, b := t[j1*ldt+j1], t[j1*ldt+j2] + c, d := t[j2*ldt+j1], t[j2*ldt+j2] + var cs, sn float64 + t[j1*ldt+j1], t[j1*ldt+j2], t[j2*ldt+j1], t[j2*ldt+j2], _, _, _, _, cs, sn = impl.Dlanv2(a, b, c, d) + if n-j1-2 > 0 { + bi.Drot(n-j1-2, t[j1*ldt+j1+2:], 1, t[j2*ldt+j1+2:], 1, cs, sn) + } + if j1 > 0 { + bi.Drot(j1, t[j1:], ldt, t[j2:], ldt, cs, sn) + } + if wantq { + bi.Drot(n, q[j1:], ldq, q[j2:], ldq, cs, sn) + } + } + if n1 == 2 { + // Standardize new 2×2 block T22. + j3 := j1 + n2 + j4 := j3 + 1 + a, b := t[j3*ldt+j3], t[j3*ldt+j4] + c, d := t[j4*ldt+j3], t[j4*ldt+j4] + var cs, sn float64 + t[j3*ldt+j3], t[j3*ldt+j4], t[j4*ldt+j3], t[j4*ldt+j4], _, _, _, _, cs, sn = impl.Dlanv2(a, b, c, d) + if n-j3-2 > 0 { + bi.Drot(n-j3-2, t[j3*ldt+j3+2:], 1, t[j4*ldt+j3+2:], 1, cs, sn) + } + bi.Drot(j3, t[j3:], ldt, t[j4:], ldt, cs, sn) + if wantq { + bi.Drot(n, q[j3:], ldq, q[j4:], ldq, cs, sn) + } + } + + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go new file mode 100644 index 00000000..6954deb4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go @@ -0,0 +1,182 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlags2 computes 2-by-2 orthogonal matrices U, V and Q with the +// triangles of A and B specified by upper. +// +// If upper is true +// +// U^T*A*Q = U^T*[ a1 a2 ]*Q = [ x 0 ] +// [ 0 a3 ] [ x x ] +// and +// V^T*B*Q = V^T*[ b1 b2 ]*Q = [ x 0 ] +// [ 0 b3 ] [ x x ] +// +// otherwise +// +// U^T*A*Q = U^T*[ a1 0 ]*Q = [ x x ] +// [ a2 a3 ] [ 0 x ] +// and +// V^T*B*Q = V^T*[ b1 0 ]*Q = [ x x ] +// [ b2 b3 ] [ 0 x ]. +// +// The rows of the transformed A and B are parallel, where +// +// U = [ csu snu ], V = [ csv snv ], Q = [ csq snq ] +// [ -snu csu ] [ -snv csv ] [ -snq csq ] +// +// Dlags2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlags2(upper bool, a1, a2, a3, b1, b2, b3 float64) (csu, snu, csv, snv, csq, snq float64) { + if upper { + // Input matrices A and B are upper triangular matrices. + // + // Form matrix C = A*adj(B) = [ a b ] + // [ 0 d ] + a := a1 * b3 + d := a3 * b1 + b := a2*b1 - a1*b2 + + // The SVD of real 2-by-2 triangular C. + // + // [ csl -snl ]*[ a b ]*[ csr snr ] = [ r 0 ] + // [ snl csl ] [ 0 d ] [ -snr csr ] [ 0 t ] + _, _, snr, csr, snl, csl := impl.Dlasv2(a, b, d) + + if math.Abs(csl) >= math.Abs(snl) || math.Abs(csr) >= math.Abs(snr) { + // Compute the [0, 0] and [0, 1] elements of U^T*A and V^T*B, + // and [0, 1] element of |U|^T*|A| and |V|^T*|B|. + + ua11r := csl * a1 + ua12 := csl*a2 + snl*a3 + + vb11r := csr * b1 + vb12 := csr*b2 + snr*b3 + + aua12 := math.Abs(csl)*math.Abs(a2) + math.Abs(snl)*math.Abs(a3) + avb12 := math.Abs(csr)*math.Abs(b2) + math.Abs(snr)*math.Abs(b3) + + // Zero [0, 1] elements of U^T*A and V^T*B. + if math.Abs(ua11r)+math.Abs(ua12) != 0 { + if aua12/(math.Abs(ua11r)+math.Abs(ua12)) <= avb12/(math.Abs(vb11r)+math.Abs(vb12)) { + csq, snq, _ = impl.Dlartg(-ua11r, ua12) + } else { + csq, snq, _ = impl.Dlartg(-vb11r, vb12) + } + } else { + csq, snq, _ = impl.Dlartg(-vb11r, vb12) + } + + csu = csl + snu = -snl + csv = csr + snv = -snr + } else { + // Compute the [1, 0] and [1, 1] elements of U^T*A and V^T*B, + // and [1, 1] element of |U|^T*|A| and |V|^T*|B|. + + ua21 := -snl * a1 + ua22 := -snl*a2 + csl*a3 + + vb21 := -snr * b1 + vb22 := -snr*b2 + csr*b3 + + aua22 := math.Abs(snl)*math.Abs(a2) + math.Abs(csl)*math.Abs(a3) + avb22 := math.Abs(snr)*math.Abs(b2) + math.Abs(csr)*math.Abs(b3) + + // Zero [1, 1] elements of U^T*A and V^T*B, and then swap. + if math.Abs(ua21)+math.Abs(ua22) != 0 { + if aua22/(math.Abs(ua21)+math.Abs(ua22)) <= avb22/(math.Abs(vb21)+math.Abs(vb22)) { + csq, snq, _ = impl.Dlartg(-ua21, ua22) + } else { + csq, snq, _ = impl.Dlartg(-vb21, vb22) + } + } else { + csq, snq, _ = impl.Dlartg(-vb21, vb22) + } + + csu = snl + snu = csl + csv = snr + snv = csr + } + } else { + // Input matrices A and B are lower triangular matrices + // + // Form matrix C = A*adj(B) = [ a 0 ] + // [ c d ] + a := a1 * b3 + d := a3 * b1 + c := a2*b3 - a3*b2 + + // The SVD of real 2-by-2 triangular C + // + // [ csl -snl ]*[ a 0 ]*[ csr snr ] = [ r 0 ] + // [ snl csl ] [ c d ] [ -snr csr ] [ 0 t ] + _, _, snr, csr, snl, csl := impl.Dlasv2(a, c, d) + + if math.Abs(csr) >= math.Abs(snr) || math.Abs(csl) >= math.Abs(snl) { + // Compute the [1, 0] and [1, 1] elements of U^T*A and V^T*B, + // and [1, 0] element of |U|^T*|A| and |V|^T*|B|. + + ua21 := -snr*a1 + csr*a2 + ua22r := csr * a3 + + vb21 := -snl*b1 + csl*b2 + vb22r := csl * b3 + + aua21 := math.Abs(snr)*math.Abs(a1) + math.Abs(csr)*math.Abs(a2) + avb21 := math.Abs(snl)*math.Abs(b1) + math.Abs(csl)*math.Abs(b2) + + // Zero [1, 0] elements of U^T*A and V^T*B. + if (math.Abs(ua21) + math.Abs(ua22r)) != 0 { + if aua21/(math.Abs(ua21)+math.Abs(ua22r)) <= avb21/(math.Abs(vb21)+math.Abs(vb22r)) { + csq, snq, _ = impl.Dlartg(ua22r, ua21) + } else { + csq, snq, _ = impl.Dlartg(vb22r, vb21) + } + } else { + csq, snq, _ = impl.Dlartg(vb22r, vb21) + } + + csu = csr + snu = -snr + csv = csl + snv = -snl + } else { + // Compute the [0, 0] and [0, 1] elements of U^T *A and V^T *B, + // and [0, 0] element of |U|^T*|A| and |V|^T*|B|. + + ua11 := csr*a1 + snr*a2 + ua12 := snr * a3 + + vb11 := csl*b1 + snl*b2 + vb12 := snl * b3 + + aua11 := math.Abs(csr)*math.Abs(a1) + math.Abs(snr)*math.Abs(a2) + avb11 := math.Abs(csl)*math.Abs(b1) + math.Abs(snl)*math.Abs(b2) + + // Zero [0, 0] elements of U^T*A and V^T*B, and then swap. + if (math.Abs(ua11) + math.Abs(ua12)) != 0 { + if aua11/(math.Abs(ua11)+math.Abs(ua12)) <= avb11/(math.Abs(vb11)+math.Abs(vb12)) { + csq, snq, _ = impl.Dlartg(ua12, ua11) + } else { + csq, snq, _ = impl.Dlartg(vb12, vb11) + } + } else { + csq, snq, _ = impl.Dlartg(vb12, vb11) + } + + csu = snr + snu = csr + csv = snl + snv = csl + } + } + + return csu, snu, csv, snv, csq, snq +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go new file mode 100644 index 00000000..00a869bc --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go @@ -0,0 +1,431 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlahqr computes the eigenvalues and Schur factorization of a block of an n×n +// upper Hessenberg matrix H, using the double-shift/single-shift QR algorithm. +// +// h and ldh represent the matrix H. Dlahqr works primarily with the Hessenberg +// submatrix H[ilo:ihi+1,ilo:ihi+1], but applies transformations to all of H if +// wantt is true. It is assumed that H[ihi+1:n,ihi+1:n] is already upper +// quasi-triangular, although this is not checked. +// +// It must hold that +// 0 <= ilo <= max(0,ihi), and ihi < n, +// and that +// H[ilo,ilo-1] == 0, if ilo > 0, +// otherwise Dlahqr will panic. +// +// If unconverged is zero on return, wr[ilo:ihi+1] and wi[ilo:ihi+1] will contain +// respectively the real and imaginary parts of the computed eigenvalues ilo +// to ihi. If two eigenvalues are computed as a complex conjugate pair, they are +// stored in consecutive elements of wr and wi, say the i-th and (i+1)th, with +// wi[i] > 0 and wi[i+1] < 0. If wantt is true, the eigenvalues are stored in +// the same order as on the diagonal of the Schur form returned in H, with +// wr[i] = H[i,i], and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, +// wi[i] = sqrt(abs(H[i+1,i]*H[i,i+1])) and wi[i+1] = -wi[i]. +// +// wr and wi must have length ihi+1. +// +// z and ldz represent an n×n matrix Z. If wantz is true, the transformations +// will be applied to the submatrix Z[iloz:ihiz+1,ilo:ihi+1] and it must hold that +// 0 <= iloz <= ilo, and ihi <= ihiz < n. +// If wantz is false, z is not referenced. +// +// unconverged indicates whether Dlahqr computed all the eigenvalues ilo to ihi +// in a total of 30 iterations per eigenvalue. +// +// If unconverged is zero, all the eigenvalues ilo to ihi have been computed and +// will be stored on return in wr[ilo:ihi+1] and wi[ilo:ihi+1]. +// +// If unconverged is zero and wantt is true, H[ilo:ihi+1,ilo:ihi+1] will be +// overwritten on return by upper quasi-triangular full Schur form with any +// 2×2 diagonal blocks in standard form. +// +// If unconverged is zero and if wantt is false, the contents of h on return is +// unspecified. +// +// If unconverged is positive, some eigenvalues have not converged, and +// wr[unconverged:ihi+1] and wi[unconverged:ihi+1] contain those eigenvalues +// which have been successfully computed. +// +// If unconverged is positive and wantt is true, then on return +// (initial H)*U = U*(final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged is positive and wantt is false, on return the remaining +// unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix +// H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged is positive and wantz is true, then on return +// (final Z) = (initial Z)*U, +// where U is the orthogonal matrix in (*) regardless of the value of wantt. +// +// Dlahqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlahqr(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int) (unconverged int) { + switch { + case n < 0: + panic(nLT0) + case ilo < 0, max(0, ihi) < ilo: + panic(badIlo) + case ihi >= n: + panic(badIhi) + case ldh < max(1, n): + panic(badLdH) + case wantz && (iloz < 0 || ilo < iloz): + panic(badIloz) + case wantz && (ihiz < ihi || n <= ihiz): + panic(badIhiz) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + } + + // Quick return if possible. + if n == 0 { + return 0 + } + + switch { + case len(h) < (n-1)*ldh+n: + panic(shortH) + case len(wr) != ihi+1: + panic(shortWr) + case len(wi) != ihi+1: + panic(shortWi) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + case ilo > 0 && h[ilo*ldh+ilo-1] != 0: + panic(notIsolated) + } + + if ilo == ihi { + wr[ilo] = h[ilo*ldh+ilo] + wi[ilo] = 0 + return 0 + } + + // Clear out the trash. + for j := ilo; j < ihi-2; j++ { + h[(j+2)*ldh+j] = 0 + h[(j+3)*ldh+j] = 0 + } + if ilo <= ihi-2 { + h[ihi*ldh+ihi-2] = 0 + } + + nh := ihi - ilo + 1 + nz := ihiz - iloz + 1 + + // Set machine-dependent constants for the stopping criterion. + ulp := dlamchP + smlnum := float64(nh) / ulp * dlamchS + + // i1 and i2 are the indices of the first row and last column of H to + // which transformations must be applied. If eigenvalues only are being + // computed, i1 and i2 are set inside the main loop. + var i1, i2 int + if wantt { + i1 = 0 + i2 = n - 1 + } + + itmax := 30 * max(10, nh) // Total number of QR iterations allowed. + + // The main loop begins here. i is the loop index and decreases from ihi + // to ilo in steps of 1 or 2. Each iteration of the loop works with the + // active submatrix in rows and columns l to i. Eigenvalues i+1 to ihi + // have already converged. Either l = ilo or H[l,l-1] is negligible so + // that the matrix splits. + bi := blas64.Implementation() + i := ihi + for i >= ilo { + l := ilo + + // Perform QR iterations on rows and columns ilo to i until a + // submatrix of order 1 or 2 splits off at the bottom because a + // subdiagonal element has become negligible. + converged := false + for its := 0; its <= itmax; its++ { + // Look for a single small subdiagonal element. + var k int + for k = i; k > l; k-- { + if math.Abs(h[k*ldh+k-1]) <= smlnum { + break + } + tst := math.Abs(h[(k-1)*ldh+k-1]) + math.Abs(h[k*ldh+k]) + if tst == 0 { + if k-2 >= ilo { + tst += math.Abs(h[(k-1)*ldh+k-2]) + } + if k+1 <= ihi { + tst += math.Abs(h[(k+1)*ldh+k]) + } + } + // The following is a conservative small + // subdiagonal deflation criterion due to Ahues + // & Tisseur (LAWN 122, 1997). It has better + // mathematical foundation and improves accuracy + // in some cases. + if math.Abs(h[k*ldh+k-1]) <= ulp*tst { + ab := math.Max(math.Abs(h[k*ldh+k-1]), math.Abs(h[(k-1)*ldh+k])) + ba := math.Min(math.Abs(h[k*ldh+k-1]), math.Abs(h[(k-1)*ldh+k])) + aa := math.Max(math.Abs(h[k*ldh+k]), math.Abs(h[(k-1)*ldh+k-1]-h[k*ldh+k])) + bb := math.Min(math.Abs(h[k*ldh+k]), math.Abs(h[(k-1)*ldh+k-1]-h[k*ldh+k])) + s := aa + ab + if ab/s*ba <= math.Max(smlnum, aa/s*bb*ulp) { + break + } + } + } + l = k + if l > ilo { + // H[l,l-1] is negligible. + h[l*ldh+l-1] = 0 + } + if l >= i-1 { + // Break the loop because a submatrix of order 1 + // or 2 has split off. + converged = true + break + } + + // Now the active submatrix is in rows and columns l to + // i. If eigenvalues only are being computed, only the + // active submatrix need be transformed. + if !wantt { + i1 = l + i2 = i + } + + const ( + dat1 = 3.0 + dat2 = -0.4375 + ) + var h11, h21, h12, h22 float64 + switch its { + case 10: // Exceptional shift. + s := math.Abs(h[(l+1)*ldh+l]) + math.Abs(h[(l+2)*ldh+l+1]) + h11 = dat1*s + h[l*ldh+l] + h12 = dat2 * s + h21 = s + h22 = h11 + case 20: // Exceptional shift. + s := math.Abs(h[i*ldh+i-1]) + math.Abs(h[(i-1)*ldh+i-2]) + h11 = dat1*s + h[i*ldh+i] + h12 = dat2 * s + h21 = s + h22 = h11 + default: // Prepare to use Francis' double shift (i.e., + // 2nd degree generalized Rayleigh quotient). + h11 = h[(i-1)*ldh+i-1] + h21 = h[i*ldh+i-1] + h12 = h[(i-1)*ldh+i] + h22 = h[i*ldh+i] + } + s := math.Abs(h11) + math.Abs(h12) + math.Abs(h21) + math.Abs(h22) + var ( + rt1r, rt1i float64 + rt2r, rt2i float64 + ) + if s != 0 { + h11 /= s + h21 /= s + h12 /= s + h22 /= s + tr := (h11 + h22) / 2 + det := (h11-tr)*(h22-tr) - h12*h21 + rtdisc := math.Sqrt(math.Abs(det)) + if det >= 0 { + // Complex conjugate shifts. + rt1r = tr * s + rt2r = rt1r + rt1i = rtdisc * s + rt2i = -rt1i + } else { + // Real shifts (use only one of them). + rt1r = tr + rtdisc + rt2r = tr - rtdisc + if math.Abs(rt1r-h22) <= math.Abs(rt2r-h22) { + rt1r *= s + rt2r = rt1r + } else { + rt2r *= s + rt1r = rt2r + } + rt1i = 0 + rt2i = 0 + } + } + + // Look for two consecutive small subdiagonal elements. + var m int + var v [3]float64 + for m = i - 2; m >= l; m-- { + // Determine the effect of starting the + // double-shift QR iteration at row m, and see + // if this would make H[m,m-1] negligible. The + // following uses scaling to avoid overflows and + // most underflows. + h21s := h[(m+1)*ldh+m] + s := math.Abs(h[m*ldh+m]-rt2r) + math.Abs(rt2i) + math.Abs(h21s) + h21s /= s + v[0] = h21s*h[m*ldh+m+1] + (h[m*ldh+m]-rt1r)*((h[m*ldh+m]-rt2r)/s) - rt2i/s*rt1i + v[1] = h21s * (h[m*ldh+m] + h[(m+1)*ldh+m+1] - rt1r - rt2r) + v[2] = h21s * h[(m+2)*ldh+m+1] + s = math.Abs(v[0]) + math.Abs(v[1]) + math.Abs(v[2]) + v[0] /= s + v[1] /= s + v[2] /= s + if m == l { + break + } + dsum := math.Abs(h[(m-1)*ldh+m-1]) + math.Abs(h[m*ldh+m]) + math.Abs(h[(m+1)*ldh+m+1]) + if math.Abs(h[m*ldh+m-1])*(math.Abs(v[1])+math.Abs(v[2])) <= ulp*math.Abs(v[0])*dsum { + break + } + } + + // Double-shift QR step. + for k := m; k < i; k++ { + // The first iteration of this loop determines a + // reflection G from the vector V and applies it + // from left and right to H, thus creating a + // non-zero bulge below the subdiagonal. + // + // Each subsequent iteration determines a + // reflection G to restore the Hessenberg form + // in the (k-1)th column, and thus chases the + // bulge one step toward the bottom of the + // active submatrix. nr is the order of G. + + nr := min(3, i-k+1) + if k > m { + bi.Dcopy(nr, h[k*ldh+k-1:], ldh, v[:], 1) + } + var t0 float64 + v[0], t0 = impl.Dlarfg(nr, v[0], v[1:], 1) + if k > m { + h[k*ldh+k-1] = v[0] + h[(k+1)*ldh+k-1] = 0 + if k < i-1 { + h[(k+2)*ldh+k-1] = 0 + } + } else if m > l { + // Use the following instead of H[k,k-1] = -H[k,k-1] + // to avoid a bug when v[1] and v[2] underflow. + h[k*ldh+k-1] *= 1 - t0 + } + t1 := t0 * v[1] + if nr == 3 { + t2 := t0 * v[2] + + // Apply G from the left to transform + // the rows of the matrix in columns k + // to i2. + for j := k; j <= i2; j++ { + sum := h[k*ldh+j] + v[1]*h[(k+1)*ldh+j] + v[2]*h[(k+2)*ldh+j] + h[k*ldh+j] -= sum * t0 + h[(k+1)*ldh+j] -= sum * t1 + h[(k+2)*ldh+j] -= sum * t2 + } + + // Apply G from the right to transform + // the columns of the matrix in rows i1 + // to min(k+3,i). + for j := i1; j <= min(k+3, i); j++ { + sum := h[j*ldh+k] + v[1]*h[j*ldh+k+1] + v[2]*h[j*ldh+k+2] + h[j*ldh+k] -= sum * t0 + h[j*ldh+k+1] -= sum * t1 + h[j*ldh+k+2] -= sum * t2 + } + + if wantz { + // Accumulate transformations in the matrix Z. + for j := iloz; j <= ihiz; j++ { + sum := z[j*ldz+k] + v[1]*z[j*ldz+k+1] + v[2]*z[j*ldz+k+2] + z[j*ldz+k] -= sum * t0 + z[j*ldz+k+1] -= sum * t1 + z[j*ldz+k+2] -= sum * t2 + } + } + } else if nr == 2 { + // Apply G from the left to transform + // the rows of the matrix in columns k + // to i2. + for j := k; j <= i2; j++ { + sum := h[k*ldh+j] + v[1]*h[(k+1)*ldh+j] + h[k*ldh+j] -= sum * t0 + h[(k+1)*ldh+j] -= sum * t1 + } + + // Apply G from the right to transform + // the columns of the matrix in rows i1 + // to min(k+3,i). + for j := i1; j <= i; j++ { + sum := h[j*ldh+k] + v[1]*h[j*ldh+k+1] + h[j*ldh+k] -= sum * t0 + h[j*ldh+k+1] -= sum * t1 + } + + if wantz { + // Accumulate transformations in the matrix Z. + for j := iloz; j <= ihiz; j++ { + sum := z[j*ldz+k] + v[1]*z[j*ldz+k+1] + z[j*ldz+k] -= sum * t0 + z[j*ldz+k+1] -= sum * t1 + } + } + } + } + } + + if !converged { + // The QR iteration finished without splitting off a + // submatrix of order 1 or 2. + return i + 1 + } + + if l == i { + // H[i,i-1] is negligible: one eigenvalue has converged. + wr[i] = h[i*ldh+i] + wi[i] = 0 + } else if l == i-1 { + // H[i-1,i-2] is negligible: a pair of eigenvalues have converged. + + // Transform the 2×2 submatrix to standard Schur form, + // and compute and store the eigenvalues. + var cs, sn float64 + a, b := h[(i-1)*ldh+i-1], h[(i-1)*ldh+i] + c, d := h[i*ldh+i-1], h[i*ldh+i] + a, b, c, d, wr[i-1], wi[i-1], wr[i], wi[i], cs, sn = impl.Dlanv2(a, b, c, d) + h[(i-1)*ldh+i-1], h[(i-1)*ldh+i] = a, b + h[i*ldh+i-1], h[i*ldh+i] = c, d + + if wantt { + // Apply the transformation to the rest of H. + if i2 > i { + bi.Drot(i2-i, h[(i-1)*ldh+i+1:], 1, h[i*ldh+i+1:], 1, cs, sn) + } + bi.Drot(i-i1-1, h[i1*ldh+i-1:], ldh, h[i1*ldh+i:], ldh, cs, sn) + } + + if wantz { + // Apply the transformation to Z. + bi.Drot(nz, z[iloz*ldz+i-1:], ldz, z[iloz*ldz+i:], ldz, cs, sn) + } + } + + // Return to start of the main loop with new value of i. + i = l - 1 + } + return 0 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go new file mode 100644 index 00000000..a47dc8fe --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go @@ -0,0 +1,195 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlahr2 reduces the first nb columns of a real general n×(n-k+1) matrix A so +// that elements below the k-th subdiagonal are zero. The reduction is performed +// by an orthogonal similarity transformation Q^T * A * Q. Dlahr2 returns the +// matrices V and T which determine Q as a block reflector I - V*T*V^T, and +// also the matrix Y = A * V * T. +// +// The matrix Q is represented as a product of nb elementary reflectors +// Q = H_0 * H_1 * ... * H_{nb-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T, +// where v is a real vector with v[0:i+k-1] = 0 and v[i+k-1] = 1. v[i+k:n] is +// stored on exit in A[i+k+1:n,i]. +// +// The elements of the vectors v together form the (n-k+1)×nb matrix +// V which is needed, with T and Y, to apply the transformation to the +// unreduced part of the matrix, using an update of the form +// A = (I - V*T*V^T) * (A - Y*V^T). +// +// On entry, a contains the n×(n-k+1) general matrix A. On return, the elements +// on and above the k-th subdiagonal in the first nb columns are overwritten +// with the corresponding elements of the reduced matrix; the elements below the +// k-th subdiagonal, with the slice tau, represent the matrix Q as a product of +// elementary reflectors. The other columns of A are unchanged. +// +// The contents of A on exit are illustrated by the following example +// with n = 7, k = 3 and nb = 2: +// [ a a a a a ] +// [ a a a a a ] +// [ a a a a a ] +// [ h h a a a ] +// [ v0 h a a a ] +// [ v0 v1 a a a ] +// [ v0 v1 a a a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// k is the offset for the reduction. Elements below the k-th subdiagonal in the +// first nb columns are reduced to zero. +// +// nb is the number of columns to be reduced. +// +// On entry, a represents the n×(n-k+1) matrix A. On return, the elements on and +// above the k-th subdiagonal in the first nb columns are overwritten with the +// corresponding elements of the reduced matrix. The elements below the k-th +// subdiagonal, with the slice tau, represent the matrix Q as a product of +// elementary reflectors. The other columns of A are unchanged. +// +// tau will contain the scalar factors of the elementary reflectors. It must +// have length at least nb. +// +// t and ldt represent the nb×nb upper triangular matrix T, and y and ldy +// represent the n×nb matrix Y. +// +// Dlahr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlahr2(n, k, nb int, a []float64, lda int, tau, t []float64, ldt int, y []float64, ldy int) { + switch { + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case nb < 0: + panic(nbLT0) + case nb > n: + panic(nbGTN) + case lda < max(1, n-k+1): + panic(badLdA) + case ldt < max(1, nb): + panic(badLdT) + case ldy < max(1, nb): + panic(badLdY) + } + + // Quick return if possible. + if n < 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n-k+1: + panic(shortA) + case len(tau) < nb: + panic(shortTau) + case len(t) < (nb-1)*ldt+nb: + panic(shortT) + case len(y) < (n-1)*ldy+nb: + panic(shortY) + } + + // Quick return if possible. + if n == 1 { + return + } + + bi := blas64.Implementation() + var ei float64 + for i := 0; i < nb; i++ { + if i > 0 { + // Update A[k:n,i]. + + // Update i-th column of A - Y * V^T. + bi.Dgemv(blas.NoTrans, n-k, i, + -1, y[k*ldy:], ldy, + a[(k+i-1)*lda:], 1, + 1, a[k*lda+i:], lda) + + // Apply I - V * T^T * V^T to this column (call it b) + // from the left, using the last column of T as + // workspace. + // Let V = [ V1 ] and b = [ b1 ] (first i rows) + // [ V2 ] [ b2 ] + // where V1 is unit lower triangular. + // + // w := V1^T * b1. + bi.Dcopy(i, a[k*lda+i:], lda, t[nb-1:], ldt) + bi.Dtrmv(blas.Lower, blas.Trans, blas.Unit, i, + a[k*lda:], lda, t[nb-1:], ldt) + + // w := w + V2^T * b2. + bi.Dgemv(blas.Trans, n-k-i, i, + 1, a[(k+i)*lda:], lda, + a[(k+i)*lda+i:], lda, + 1, t[nb-1:], ldt) + + // w := T^T * w. + bi.Dtrmv(blas.Upper, blas.Trans, blas.NonUnit, i, + t, ldt, t[nb-1:], ldt) + + // b2 := b2 - V2*w. + bi.Dgemv(blas.NoTrans, n-k-i, i, + -1, a[(k+i)*lda:], lda, + t[nb-1:], ldt, + 1, a[(k+i)*lda+i:], lda) + + // b1 := b1 - V1*w. + bi.Dtrmv(blas.Lower, blas.NoTrans, blas.Unit, i, + a[k*lda:], lda, t[nb-1:], ldt) + bi.Daxpy(i, -1, t[nb-1:], ldt, a[k*lda+i:], lda) + + a[(k+i-1)*lda+i-1] = ei + } + + // Generate the elementary reflector H_i to annihilate + // A[k+i+1:n,i]. + ei, tau[i] = impl.Dlarfg(n-k-i, a[(k+i)*lda+i], a[min(k+i+1, n-1)*lda+i:], lda) + a[(k+i)*lda+i] = 1 + + // Compute Y[k:n,i]. + bi.Dgemv(blas.NoTrans, n-k, n-k-i, + 1, a[k*lda+i+1:], lda, + a[(k+i)*lda+i:], lda, + 0, y[k*ldy+i:], ldy) + bi.Dgemv(blas.Trans, n-k-i, i, + 1, a[(k+i)*lda:], lda, + a[(k+i)*lda+i:], lda, + 0, t[i:], ldt) + bi.Dgemv(blas.NoTrans, n-k, i, + -1, y[k*ldy:], ldy, + t[i:], ldt, + 1, y[k*ldy+i:], ldy) + bi.Dscal(n-k, tau[i], y[k*ldy+i:], ldy) + + // Compute T[0:i,i]. + bi.Dscal(i, -tau[i], t[i:], ldt) + bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, + t, ldt, t[i:], ldt) + + t[i*ldt+i] = tau[i] + } + a[(k+nb-1)*lda+nb-1] = ei + + // Compute Y[0:k,0:nb]. + impl.Dlacpy(blas.All, k, nb, a[1:], lda, y, ldy) + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, k, nb, + 1, a[k*lda:], lda, y, ldy) + if n > k+nb { + bi.Dgemm(blas.NoTrans, blas.NoTrans, k, nb, n-k-nb, + 1, a[1+nb:], lda, + a[(k+nb)*lda:], lda, + 1, y, ldy) + } + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, k, nb, + 1, t, ldt, y, ldy) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go new file mode 100644 index 00000000..ca0b2f78 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go @@ -0,0 +1,405 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaln2 solves a linear equation or a system of 2 linear equations of the form +// (ca A - w D) X = scale B, if trans == false, +// (ca A^T - w D) X = scale B, if trans == true, +// where A is a na×na real matrix, ca is a real scalar, D is a na×na diagonal +// real matrix, w is a scalar, real if nw == 1, complex if nw == 2, and X and B +// are na×1 matrices, real if w is real, complex if w is complex. +// +// If w is complex, X and B are represented as na×2 matrices, the first column +// of each being the real part and the second being the imaginary part. +// +// na and nw must be 1 or 2, otherwise Dlaln2 will panic. +// +// d1 and d2 are the diagonal elements of D. d2 is not used if na == 1. +// +// wr and wi represent the real and imaginary part, respectively, of the scalar +// w. wi is not used if nw == 1. +// +// smin is the desired lower bound on the singular values of A. This should be +// a safe distance away from underflow or overflow, say, between +// (underflow/machine precision) and (overflow*machine precision). +// +// If both singular values of (ca A - w D) are less than smin, smin*identity +// will be used instead of (ca A - w D). If only one singular value is less than +// smin, one element of (ca A - w D) will be perturbed enough to make the +// smallest singular value roughly smin. If both singular values are at least +// smin, (ca A - w D) will not be perturbed. In any case, the perturbation will +// be at most some small multiple of max(smin, ulp*norm(ca A - w D)). The +// singular values are computed by infinity-norm approximations, and thus will +// only be correct to a factor of 2 or so. +// +// All input quantities are assumed to be smaller than overflow by a reasonable +// factor. +// +// scale is a scaling factor less than or equal to 1 which is chosen so that X +// can be computed without overflow. X is further scaled if necessary to assure +// that norm(ca A - w D)*norm(X) is less than overflow. +// +// xnorm contains the infinity-norm of X when X is regarded as a na×nw real +// matrix. +// +// ok will be false if (ca A - w D) had to be perturbed to make its smallest +// singular value greater than smin, otherwise ok will be true. +// +// Dlaln2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaln2(trans bool, na, nw int, smin, ca float64, a []float64, lda int, d1, d2 float64, b []float64, ldb int, wr, wi float64, x []float64, ldx int) (scale, xnorm float64, ok bool) { + // TODO(vladimir-ch): Consider splitting this function into two, one + // handling the real case (nw == 1) and the other handling the complex + // case (nw == 2). Given that Go has complex types, their signatures + // would be simpler and more natural, and the implementation not as + // convoluted. + + switch { + case na != 1 && na != 2: + panic(badNa) + case nw != 1 && nw != 2: + panic(badNw) + case lda < na: + panic(badLdA) + case len(a) < (na-1)*lda+na: + panic(shortA) + case ldb < nw: + panic(badLdB) + case len(b) < (na-1)*ldb+nw: + panic(shortB) + case ldx < nw: + panic(badLdX) + case len(x) < (na-1)*ldx+nw: + panic(shortX) + } + + smlnum := 2 * dlamchS + bignum := 1 / smlnum + smini := math.Max(smin, smlnum) + + ok = true + scale = 1 + + if na == 1 { + // 1×1 (i.e., scalar) system C X = B. + + if nw == 1 { + // Real 1×1 system. + + // C = ca A - w D. + csr := ca*a[0] - wr*d1 + cnorm := math.Abs(csr) + + // If |C| < smini, use C = smini. + if cnorm < smini { + csr = smini + cnorm = smini + ok = false + } + + // Check scaling for X = B / C. + bnorm := math.Abs(b[0]) + if cnorm < 1 && bnorm > math.Max(1, bignum*cnorm) { + scale = 1 / bnorm + } + + // Compute X. + x[0] = b[0] * scale / csr + xnorm = math.Abs(x[0]) + + return scale, xnorm, ok + } + + // Complex 1×1 system (w is complex). + + // C = ca A - w D. + csr := ca*a[0] - wr*d1 + csi := -wi * d1 + cnorm := math.Abs(csr) + math.Abs(csi) + + // If |C| < smini, use C = smini. + if cnorm < smini { + csr = smini + csi = 0 + cnorm = smini + ok = false + } + + // Check scaling for X = B / C. + bnorm := math.Abs(b[0]) + math.Abs(b[1]) + if cnorm < 1 && bnorm > math.Max(1, bignum*cnorm) { + scale = 1 / bnorm + } + + // Compute X. + cx := complex(scale*b[0], scale*b[1]) / complex(csr, csi) + x[0], x[1] = real(cx), imag(cx) + xnorm = math.Abs(x[0]) + math.Abs(x[1]) + + return scale, xnorm, ok + } + + // 2×2 system. + + // Compute the real part of + // C = ca A - w D + // or + // C = ca A^T - w D. + crv := [4]float64{ + ca*a[0] - wr*d1, + ca * a[1], + ca * a[lda], + ca*a[lda+1] - wr*d2, + } + if trans { + crv[1] = ca * a[lda] + crv[2] = ca * a[1] + } + + pivot := [4][4]int{ + {0, 1, 2, 3}, + {1, 0, 3, 2}, + {2, 3, 0, 1}, + {3, 2, 1, 0}, + } + + if nw == 1 { + // Real 2×2 system (w is real). + + // Find the largest element in C. + var cmax float64 + var icmax int + for j, v := range crv { + v = math.Abs(v) + if v > cmax { + cmax = v + icmax = j + } + } + + // If norm(C) < smini, use smini*identity. + if cmax < smini { + bnorm := math.Max(math.Abs(b[0]), math.Abs(b[ldb])) + if smini < 1 && bnorm > math.Max(1, bignum*smini) { + scale = 1 / bnorm + } + temp := scale / smini + x[0] = temp * b[0] + x[ldx] = temp * b[ldb] + xnorm = temp * bnorm + ok = false + + return scale, xnorm, ok + } + + // Gaussian elimination with complete pivoting. + // Form upper triangular matrix + // [ur11 ur12] + // [ 0 ur22] + ur11 := crv[icmax] + ur12 := crv[pivot[icmax][1]] + cr21 := crv[pivot[icmax][2]] + cr22 := crv[pivot[icmax][3]] + ur11r := 1 / ur11 + lr21 := ur11r * cr21 + ur22 := cr22 - ur12*lr21 + + // If smaller pivot < smini, use smini. + if math.Abs(ur22) < smini { + ur22 = smini + ok = false + } + + var br1, br2 float64 + if icmax > 1 { + // If the pivot lies in the second row, swap the rows. + br1 = b[ldb] + br2 = b[0] + } else { + br1 = b[0] + br2 = b[ldb] + } + br2 -= lr21 * br1 // Apply the Gaussian elimination step to the right-hand side. + + bbnd := math.Max(math.Abs(ur22*ur11r*br1), math.Abs(br2)) + if bbnd > 1 && math.Abs(ur22) < 1 && bbnd >= bignum*math.Abs(ur22) { + scale = 1 / bbnd + } + + // Solve the linear system ur*xr=br. + xr2 := br2 * scale / ur22 + xr1 := scale*br1*ur11r - ur11r*ur12*xr2 + if icmax&0x1 != 0 { + // If the pivot lies in the second column, swap the components of the solution. + x[0] = xr2 + x[ldx] = xr1 + } else { + x[0] = xr1 + x[ldx] = xr2 + } + xnorm = math.Max(math.Abs(xr1), math.Abs(xr2)) + + // Further scaling if norm(A)*norm(X) > overflow. + if xnorm > 1 && cmax > 1 && xnorm > bignum/cmax { + temp := cmax / bignum + x[0] *= temp + x[ldx] *= temp + xnorm *= temp + scale *= temp + } + + return scale, xnorm, ok + } + + // Complex 2×2 system (w is complex). + + // Find the largest element in C. + civ := [4]float64{ + -wi * d1, + 0, + 0, + -wi * d2, + } + var cmax float64 + var icmax int + for j, v := range crv { + v := math.Abs(v) + if v+math.Abs(civ[j]) > cmax { + cmax = v + math.Abs(civ[j]) + icmax = j + } + } + + // If norm(C) < smini, use smini*identity. + if cmax < smini { + br1 := math.Abs(b[0]) + math.Abs(b[1]) + br2 := math.Abs(b[ldb]) + math.Abs(b[ldb+1]) + bnorm := math.Max(br1, br2) + if smini < 1 && bnorm > 1 && bnorm > bignum*smini { + scale = 1 / bnorm + } + temp := scale / smini + x[0] = temp * b[0] + x[1] = temp * b[1] + x[ldb] = temp * b[ldb] + x[ldb+1] = temp * b[ldb+1] + xnorm = temp * bnorm + ok = false + + return scale, xnorm, ok + } + + // Gaussian elimination with complete pivoting. + ur11 := crv[icmax] + ui11 := civ[icmax] + ur12 := crv[pivot[icmax][1]] + ui12 := civ[pivot[icmax][1]] + cr21 := crv[pivot[icmax][2]] + ci21 := civ[pivot[icmax][2]] + cr22 := crv[pivot[icmax][3]] + ci22 := civ[pivot[icmax][3]] + var ( + ur11r, ui11r float64 + lr21, li21 float64 + ur12s, ui12s float64 + ur22, ui22 float64 + ) + if icmax == 0 || icmax == 3 { + // Off-diagonals of pivoted C are real. + if math.Abs(ur11) > math.Abs(ui11) { + temp := ui11 / ur11 + ur11r = 1 / (ur11 * (1 + temp*temp)) + ui11r = -temp * ur11r + } else { + temp := ur11 / ui11 + ui11r = -1 / (ui11 * (1 + temp*temp)) + ur11r = -temp * ui11r + } + lr21 = cr21 * ur11r + li21 = cr21 * ui11r + ur12s = ur12 * ur11r + ui12s = ur12 * ui11r + ur22 = cr22 - ur12*lr21 + ui22 = ci22 - ur12*li21 + } else { + // Diagonals of pivoted C are real. + ur11r = 1 / ur11 + // ui11r is already 0. + lr21 = cr21 * ur11r + li21 = ci21 * ur11r + ur12s = ur12 * ur11r + ui12s = ui12 * ur11r + ur22 = cr22 - ur12*lr21 + ui12*li21 + ui22 = -ur12*li21 - ui12*lr21 + } + u22abs := math.Abs(ur22) + math.Abs(ui22) + + // If smaller pivot < smini, use smini. + if u22abs < smini { + ur22 = smini + ui22 = 0 + ok = false + } + + var br1, bi1 float64 + var br2, bi2 float64 + if icmax > 1 { + // If the pivot lies in the second row, swap the rows. + br1 = b[ldb] + bi1 = b[ldb+1] + br2 = b[0] + bi2 = b[1] + } else { + br1 = b[0] + bi1 = b[1] + br2 = b[ldb] + bi2 = b[ldb+1] + } + br2 += -lr21*br1 + li21*bi1 + bi2 += -li21*br1 - lr21*bi1 + + bbnd1 := u22abs * (math.Abs(ur11r) + math.Abs(ui11r)) * (math.Abs(br1) + math.Abs(bi1)) + bbnd2 := math.Abs(br2) + math.Abs(bi2) + bbnd := math.Max(bbnd1, bbnd2) + if bbnd > 1 && u22abs < 1 && bbnd >= bignum*u22abs { + scale = 1 / bbnd + br1 *= scale + bi1 *= scale + br2 *= scale + bi2 *= scale + } + + cx2 := complex(br2, bi2) / complex(ur22, ui22) + xr2, xi2 := real(cx2), imag(cx2) + xr1 := ur11r*br1 - ui11r*bi1 - ur12s*xr2 + ui12s*xi2 + xi1 := ui11r*br1 + ur11r*bi1 - ui12s*xr2 - ur12s*xi2 + if icmax&0x1 != 0 { + // If the pivot lies in the second column, swap the components of the solution. + x[0] = xr2 + x[1] = xi2 + x[ldx] = xr1 + x[ldx+1] = xi1 + } else { + x[0] = xr1 + x[1] = xi1 + x[ldx] = xr2 + x[ldx+1] = xi2 + } + xnorm = math.Max(math.Abs(xr1)+math.Abs(xi1), math.Abs(xr2)+math.Abs(xi2)) + + // Further scaling if norm(A)*norm(X) > overflow. + if xnorm > 1 && cmax > 1 && xnorm > bignum/cmax { + temp := cmax / bignum + x[0] *= temp + x[1] *= temp + x[ldx] *= temp + x[ldx+1] *= temp + xnorm *= temp + scale *= temp + } + + return scale, xnorm, ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go new file mode 100644 index 00000000..9edfa83c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go @@ -0,0 +1,89 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlange computes the matrix norm of the general m×n matrix a. The input norm +// specifies the norm computed. +// lapack.MaxAbs: the maximum absolute value of an element. +// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. +// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. +// lapack.Frobenius: the square root of the sum of the squares of the entries. +// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func (impl Implementation) Dlange(norm lapack.MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 { + // TODO(btracey): These should probably be refactored to use BLAS calls. + switch { + case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: + panic(badNorm) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return 0 + } + + switch { + case len(a) < (m-1)*lda+n: + panic(badLdA) + case norm == lapack.MaxColumnSum && len(work) < n: + panic(shortWork) + } + + if norm == lapack.MaxAbs { + var value float64 + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + value = math.Max(value, math.Abs(a[i*lda+j])) + } + } + return value + } + if norm == lapack.MaxColumnSum { + if len(work) < n { + panic(shortWork) + } + for i := 0; i < n; i++ { + work[i] = 0 + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + var value float64 + for i := 0; i < n; i++ { + value = math.Max(value, work[i]) + } + return value + } + if norm == lapack.MaxRowSum { + var value float64 + for i := 0; i < m; i++ { + var sum float64 + for j := 0; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + value = math.Max(value, sum) + } + return value + } + // norm == lapack.Frobenius + var value float64 + scale := 0.0 + sum := 1.0 + for i := 0; i < m; i++ { + scale, sum = impl.Dlassq(n, a[i*lda:], 1, scale, sum) + } + value = scale * math.Sqrt(sum) + return value +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go new file mode 100644 index 00000000..9ca1897e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go @@ -0,0 +1,75 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlanst computes the specified norm of a symmetric tridiagonal matrix A. +// The diagonal elements of A are stored in d and the off-diagonal elements +// are stored in e. +func (impl Implementation) Dlanst(norm lapack.MatrixNorm, n int, d, e []float64) float64 { + switch { + case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: + panic(badNorm) + case n < 0: + panic(nLT0) + } + if n == 0 { + return 0 + } + switch { + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + } + + switch norm { + default: + panic(badNorm) + case lapack.MaxAbs: + anorm := math.Abs(d[n-1]) + for i := 0; i < n-1; i++ { + sum := math.Abs(d[i]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + sum = math.Abs(e[i]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + } + return anorm + case lapack.MaxColumnSum, lapack.MaxRowSum: + if n == 1 { + return math.Abs(d[0]) + } + anorm := math.Abs(d[0]) + math.Abs(e[0]) + sum := math.Abs(e[n-2]) + math.Abs(d[n-1]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + for i := 1; i < n-1; i++ { + sum := math.Abs(d[i]) + math.Abs(e[i]) + math.Abs(e[i-1]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + } + return anorm + case lapack.Frobenius: + var scale float64 + sum := 1.0 + if n > 1 { + scale, sum = impl.Dlassq(n-1, e, 1, scale, sum) + sum = 2 * sum + } + scale, sum = impl.Dlassq(n, d, 1, scale, sum) + return scale * math.Sqrt(sum) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go new file mode 100644 index 00000000..97ba5b24 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go @@ -0,0 +1,132 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlansy computes the specified norm of an n×n symmetric matrix. If +// norm == lapack.MaxColumnSum or norm == lapackMaxRowSum work must have length +// at least n, otherwise work is unused. +func (impl Implementation) Dlansy(norm lapack.MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64 { + switch { + case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: + panic(badNorm) + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return 0 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case (norm == lapack.MaxColumnSum || norm == lapack.MaxRowSum) && len(work) < n: + panic(shortWork) + } + + switch norm { + default: + panic(badNorm) + case lapack.MaxAbs: + if uplo == blas.Upper { + var max float64 + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + v := math.Abs(a[i*lda+j]) + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + } + return max + } + var max float64 + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + v := math.Abs(a[i*lda+j]) + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + } + return max + case lapack.MaxRowSum, lapack.MaxColumnSum: + // A symmetric matrix has the same 1-norm and ∞-norm. + for i := 0; i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < n; i++ { + work[i] += math.Abs(a[i*lda+i]) + for j := i + 1; j < n; j++ { + v := math.Abs(a[i*lda+j]) + work[i] += v + work[j] += v + } + } + } else { + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + v := math.Abs(a[i*lda+j]) + work[i] += v + work[j] += v + } + work[i] += math.Abs(a[i*lda+i]) + } + } + var max float64 + for i := 0; i < n; i++ { + v := work[i] + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + return max + case lapack.Frobenius: + if uplo == blas.Upper { + var sum float64 + for i := 0; i < n; i++ { + v := a[i*lda+i] + sum += v * v + for j := i + 1; j < n; j++ { + v := a[i*lda+j] + sum += 2 * v * v + } + } + return math.Sqrt(sum) + } + var sum float64 + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + v := a[i*lda+j] + sum += 2 * v * v + } + v := a[i*lda+i] + sum += v * v + } + return math.Sqrt(sum) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go new file mode 100644 index 00000000..cc96391d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go @@ -0,0 +1,260 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlantr computes the specified norm of an m×n trapezoidal matrix A. If +// norm == lapack.MaxColumnSum work must have length at least n, otherwise work +// is unused. +func (impl Implementation) Dlantr(norm lapack.MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64 { + switch { + case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: + panic(badNorm) + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + minmn := min(m, n) + if minmn == 0 { + return 0 + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case norm == lapack.MaxColumnSum && len(work) < n: + panic(shortWork) + } + + switch norm { + default: + panic(badNorm) + case lapack.MaxAbs: + if diag == blas.Unit { + value := 1.0 + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + var value float64 + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + case lapack.MaxColumnSum: + if diag == blas.Unit { + for i := 0; i < minmn; i++ { + work[i] = 1 + } + for i := minmn; i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } else { + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } + } else { + for i := 0; i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } + } + var max float64 + for _, v := range work[:n] { + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + return max + case lapack.MaxRowSum: + var maxsum float64 + if diag == blas.Unit { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + var sum float64 + if i < minmn { + sum = 1 + } + for j := i + 1; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return math.NaN() + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } else { + for i := 1; i < m; i++ { + var sum float64 + if i < minmn { + sum = 1 + } + for j := 0; j < min(i, n); j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return math.NaN() + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } + } else { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + var sum float64 + for j := i; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return sum + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } else { + for i := 0; i < m; i++ { + var sum float64 + for j := 0; j <= min(i, n-1); j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return sum + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } + } + case lapack.Frobenius: + var nrm float64 + if diag == blas.Unit { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + tmp := a[i*lda+j] + nrm += tmp * tmp + } + } + } else { + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + tmp := a[i*lda+j] + nrm += tmp * tmp + } + } + } + nrm += float64(minmn) + } else { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + nrm += tmp * tmp + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + tmp := math.Abs(a[i*lda+j]) + nrm += tmp * tmp + } + } + } + } + return math.Sqrt(nrm) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go new file mode 100644 index 00000000..e5dcfb75 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go @@ -0,0 +1,132 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlanv2 computes the Schur factorization of a real 2×2 matrix: +// [ a b ] = [ cs -sn ] * [ aa bb ] * [ cs sn ] +// [ c d ] [ sn cs ] [ cc dd ] * [-sn cs ] +// If cc is zero, aa and dd are real eigenvalues of the matrix. Otherwise it +// holds that aa = dd and bb*cc < 0, and aa ± sqrt(bb*cc) are complex conjugate +// eigenvalues. The real and imaginary parts of the eigenvalues are returned in +// (rt1r,rt1i) and (rt2r,rt2i). +func (impl Implementation) Dlanv2(a, b, c, d float64) (aa, bb, cc, dd float64, rt1r, rt1i, rt2r, rt2i float64, cs, sn float64) { + switch { + case c == 0: // Matrix is already upper triangular. + aa = a + bb = b + cc = 0 + dd = d + cs = 1 + sn = 0 + case b == 0: // Matrix is lower triangular, swap rows and columns. + aa = d + bb = -c + cc = 0 + dd = a + cs = 0 + sn = 1 + case a == d && math.Signbit(b) != math.Signbit(c): // Matrix is already in the standard Schur form. + aa = a + bb = b + cc = c + dd = d + cs = 1 + sn = 0 + default: + temp := a - d + p := temp / 2 + bcmax := math.Max(math.Abs(b), math.Abs(c)) + bcmis := math.Min(math.Abs(b), math.Abs(c)) + if b*c < 0 { + bcmis *= -1 + } + scale := math.Max(math.Abs(p), bcmax) + z := p/scale*p + bcmax/scale*bcmis + eps := dlamchP + + if z >= 4*eps { + // Real eigenvalues. Compute aa and dd. + if p > 0 { + z = p + math.Sqrt(scale)*math.Sqrt(z) + } else { + z = p - math.Sqrt(scale)*math.Sqrt(z) + } + aa = d + z + dd = d - bcmax/z*bcmis + // Compute bb and the rotation matrix. + tau := impl.Dlapy2(c, z) + cs = z / tau + sn = c / tau + bb = b - c + cc = 0 + } else { + // Complex eigenvalues, or real (almost) equal eigenvalues. + // Make diagonal elements equal. + sigma := b + c + tau := impl.Dlapy2(sigma, temp) + cs = math.Sqrt((1 + math.Abs(sigma)/tau) / 2) + sn = -p / (tau * cs) + if sigma < 0 { + sn *= -1 + } + // Compute [ aa bb ] = [ a b ] [ cs -sn ] + // [ cc dd ] [ c d ] [ sn cs ] + aa = a*cs + b*sn + bb = -a*sn + b*cs + cc = c*cs + d*sn + dd = -c*sn + d*cs + // Compute [ a b ] = [ cs sn ] [ aa bb ] + // [ c d ] [-sn cs ] [ cc dd ] + a = aa*cs + cc*sn + b = bb*cs + dd*sn + c = -aa*sn + cc*cs + d = -bb*sn + dd*cs + + temp = (a + d) / 2 + aa = temp + bb = b + cc = c + dd = temp + + if cc != 0 { + if bb != 0 { + if math.Signbit(bb) == math.Signbit(cc) { + // Real eigenvalues, reduce to + // upper triangular form. + sab := math.Sqrt(math.Abs(bb)) + sac := math.Sqrt(math.Abs(cc)) + p = sab * sac + if cc < 0 { + p *= -1 + } + tau = 1 / math.Sqrt(math.Abs(bb+cc)) + aa = temp + p + bb = bb - cc + cc = 0 + dd = temp - p + cs1 := sab * tau + sn1 := sac * tau + cs, sn = cs*cs1-sn*sn1, cs*sn1+sn+cs1 + } + } else { + bb = -cc + cc = 0 + cs, sn = -sn, cs + } + } + } + } + + // Store eigenvalues in (rt1r,rt1i) and (rt2r,rt2i). + rt1r = aa + rt2r = dd + if cc != 0 { + rt1i = math.Sqrt(math.Abs(bb)) * math.Sqrt(math.Abs(cc)) + rt2i = -rt1i + } + return +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go new file mode 100644 index 00000000..bf98c338 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go @@ -0,0 +1,55 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlapll returns the smallest singular value of the n×2 matrix A = [ x y ]. +// The function first computes the QR factorization of A = Q*R, and then computes +// the SVD of the 2-by-2 upper triangular matrix r. +// +// The contents of x and y are overwritten during the call. +// +// Dlapll is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlapll(n int, x []float64, incX int, y []float64, incY int) float64 { + switch { + case n < 0: + panic(nLT0) + case incX <= 0: + panic(badIncX) + case incY <= 0: + panic(badIncY) + } + + // Quick return if possible. + if n == 0 { + return 0 + } + + switch { + case len(x) < 1+(n-1)*incX: + panic(shortX) + case len(y) < 1+(n-1)*incY: + panic(shortY) + } + + // Quick return if possible. + if n == 1 { + return 0 + } + + // Compute the QR factorization of the N-by-2 matrix [ X Y ]. + a00, tau := impl.Dlarfg(n, x[0], x[incX:], incX) + x[0] = 1 + + bi := blas64.Implementation() + c := -tau * bi.Ddot(n, x, incX, y, incY) + bi.Daxpy(n, c, x, incX, y, incY) + a11, _ := impl.Dlarfg(n-1, y[incY], y[2*incY:], incY) + + // Compute the SVD of 2-by-2 upper triangular matrix. + ssmin, _ := impl.Dlas2(a00, y[0], a11) + return ssmin +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go new file mode 100644 index 00000000..55f1567f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go @@ -0,0 +1,89 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlapmt rearranges the columns of the m×n matrix X as specified by the +// permutation k_0, k_1, ..., k_n-1 of the integers 0, ..., n-1. +// +// If forward is true a forward permutation is performed: +// +// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. +// +// otherwise a backward permutation is performed: +// +// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. +// +// k must have length n, otherwise Dlapmt will panic. k is zero-indexed. +func (impl Implementation) Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case ldx < max(1, n): + panic(badLdX) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + switch { + case len(x) < (m-1)*ldx+n: + panic(shortX) + case len(k) != n: + panic(badLenK) + } + + // Quick return if possible. + if n == 1 { + return + } + + for i, v := range k { + v++ + k[i] = -v + } + + bi := blas64.Implementation() + + if forward { + for j, v := range k { + if v >= 0 { + continue + } + k[j] = -v + i := -v - 1 + for k[i] < 0 { + bi.Dswap(m, x[j:], ldx, x[i:], ldx) + + k[i] = -k[i] + j = i + i = k[i] - 1 + } + } + } else { + for i, v := range k { + if v >= 0 { + continue + } + k[i] = -v + j := -v - 1 + for j != i { + bi.Dswap(m, x[j:], ldx, x[i:], ldx) + + k[j] = -k[j] + j = k[j] - 1 + } + } + } + + for i := range k { + k[i]-- + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go new file mode 100644 index 00000000..19f73ffa --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go @@ -0,0 +1,14 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlapy2 is the LAPACK version of math.Hypot. +// +// Dlapy2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dlapy2(x, y float64) float64 { + return math.Hypot(x, y) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go new file mode 100644 index 00000000..d3a0def6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go @@ -0,0 +1,127 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqp2 computes a QR factorization with column pivoting of the block A[offset:m, 0:n] +// of the m×n matrix A. The block A[0:offset, 0:n] is accordingly pivoted, but not factorized. +// +// On exit, the upper triangle of block A[offset:m, 0:n] is the triangular factor obtained. +// The elements in block A[offset:m, 0:n] below the diagonal, together with tau, represent +// the orthogonal matrix Q as a product of elementary reflectors. +// +// offset is number of rows of the matrix A that must be pivoted but not factorized. +// offset must not be negative otherwise Dlaqp2 will panic. +// +// On exit, jpvt holds the permutation that was applied; the jth column of A*P was the +// jpvt[j] column of A. jpvt must have length n, otherwise Dlaqp2 will panic. +// +// On exit tau holds the scalar factors of the elementary reflectors. It must have length +// at least min(m-offset, n) otherwise Dlaqp2 will panic. +// +// vn1 and vn2 hold the partial and complete column norms respectively. They must have length n, +// otherwise Dlaqp2 will panic. +// +// work must have length n, otherwise Dlaqp2 will panic. +// +// Dlaqp2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqp2(m, n, offset int, a []float64, lda int, jpvt []int, tau, vn1, vn2, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case offset < 0: + panic(offsetLT0) + case offset > m: + panic(offsetGTM) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + mn := min(m-offset, n) + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(jpvt) != n: + panic(badLenJpvt) + case len(tau) < mn: + panic(shortTau) + case len(vn1) < n: + panic(shortVn1) + case len(vn2) < n: + panic(shortVn2) + case len(work) < n: + panic(shortWork) + } + + tol3z := math.Sqrt(dlamchE) + + bi := blas64.Implementation() + + // Compute factorization. + for i := 0; i < mn; i++ { + offpi := offset + i + + // Determine ith pivot column and swap if necessary. + p := i + bi.Idamax(n-i, vn1[i:], 1) + if p != i { + bi.Dswap(m, a[p:], lda, a[i:], lda) + jpvt[p], jpvt[i] = jpvt[i], jpvt[p] + vn1[p] = vn1[i] + vn2[p] = vn2[i] + } + + // Generate elementary reflector H_i. + if offpi < m-1 { + a[offpi*lda+i], tau[i] = impl.Dlarfg(m-offpi, a[offpi*lda+i], a[(offpi+1)*lda+i:], lda) + } else { + tau[i] = 0 + } + + if i < n-1 { + // Apply H_i^T to A[offset+i:m, i:n] from the left. + aii := a[offpi*lda+i] + a[offpi*lda+i] = 1 + impl.Dlarf(blas.Left, m-offpi, n-i-1, a[offpi*lda+i:], lda, tau[i], a[offpi*lda+i+1:], lda, work) + a[offpi*lda+i] = aii + } + + // Update partial column norms. + for j := i + 1; j < n; j++ { + if vn1[j] == 0 { + continue + } + + // The following marked lines follow from the + // analysis in Lapack Working Note 176. + r := math.Abs(a[offpi*lda+j]) / vn1[j] // * + temp := math.Max(0, 1-r*r) // * + r = vn1[j] / vn2[j] // * + temp2 := temp * r * r // * + if temp2 < tol3z { + var v float64 + if offpi < m-1 { + v = bi.Dnrm2(m-offpi-1, a[(offpi+1)*lda+j:], lda) + } + vn1[j] = v + vn2[j] = v + } else { + vn1[j] *= math.Sqrt(temp) // * + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go new file mode 100644 index 00000000..dd683b62 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go @@ -0,0 +1,244 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqps computes a step of QR factorization with column pivoting +// of an m×n matrix A by using Blas-3. It tries to factorize nb +// columns from A starting from the row offset, and updates all +// of the matrix with Dgemm. +// +// In some cases, due to catastrophic cancellations, it cannot +// factorize nb columns. Hence, the actual number of factorized +// columns is returned in kb. +// +// Dlaqps computes a QR factorization with column pivoting of the +// block A[offset:m, 0:nb] of the m×n matrix A. The block +// A[0:offset, 0:n] is accordingly pivoted, but not factorized. +// +// On exit, the upper triangle of block A[offset:m, 0:kb] is the +// triangular factor obtained. The elements in block A[offset:m, 0:n] +// below the diagonal, together with tau, represent the orthogonal +// matrix Q as a product of elementary reflectors. +// +// offset is number of rows of the matrix A that must be pivoted but +// not factorized. offset must not be negative otherwise Dlaqps will panic. +// +// On exit, jpvt holds the permutation that was applied; the jth column +// of A*P was the jpvt[j] column of A. jpvt must have length n, +// otherwise Dlapqs will panic. +// +// On exit tau holds the scalar factors of the elementary reflectors. +// It must have length nb, otherwise Dlapqs will panic. +// +// vn1 and vn2 hold the partial and complete column norms respectively. +// They must have length n, otherwise Dlapqs will panic. +// +// auxv must have length nb, otherwise Dlaqps will panic. +// +// f and ldf represent an n×nb matrix F that is overwritten during the +// call. +// +// Dlaqps is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqps(m, n, offset, nb int, a []float64, lda int, jpvt []int, tau, vn1, vn2, auxv, f []float64, ldf int) (kb int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case offset < 0: + panic(offsetLT0) + case offset > m: + panic(offsetGTM) + case nb < 0: + panic(nbLT0) + case nb > n: + panic(nbGTN) + case lda < max(1, n): + panic(badLdA) + case ldf < max(1, nb): + panic(badLdF) + } + + if m == 0 || n == 0 { + return 0 + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(jpvt) != n: + panic(badLenJpvt) + case len(vn1) < n: + panic(shortVn1) + case len(vn2) < n: + panic(shortVn2) + } + + if nb == 0 { + return 0 + } + + switch { + case len(tau) < nb: + panic(shortTau) + case len(auxv) < nb: + panic(shortAuxv) + case len(f) < (n-1)*ldf+nb: + panic(shortF) + } + + if offset == m { + return 0 + } + + lastrk := min(m, n+offset) + lsticc := -1 + tol3z := math.Sqrt(dlamchE) + + bi := blas64.Implementation() + + var k, rk int + for ; k < nb && lsticc == -1; k++ { + rk = offset + k + + // Determine kth pivot column and swap if necessary. + p := k + bi.Idamax(n-k, vn1[k:], 1) + if p != k { + bi.Dswap(m, a[p:], lda, a[k:], lda) + bi.Dswap(k, f[p*ldf:], 1, f[k*ldf:], 1) + jpvt[p], jpvt[k] = jpvt[k], jpvt[p] + vn1[p] = vn1[k] + vn2[p] = vn2[k] + } + + // Apply previous Householder reflectors to column K: + // + // A[rk:m, k] = A[rk:m, k] - A[rk:m, 0:k-1]*F[k, 0:k-1]^T. + if k > 0 { + bi.Dgemv(blas.NoTrans, m-rk, k, -1, + a[rk*lda:], lda, + f[k*ldf:], 1, + 1, + a[rk*lda+k:], lda) + } + + // Generate elementary reflector H_k. + if rk < m-1 { + a[rk*lda+k], tau[k] = impl.Dlarfg(m-rk, a[rk*lda+k], a[(rk+1)*lda+k:], lda) + } else { + tau[k] = 0 + } + + akk := a[rk*lda+k] + a[rk*lda+k] = 1 + + // Compute kth column of F: + // + // Compute F[k+1:n, k] = tau[k]*A[rk:m, k+1:n]^T*A[rk:m, k]. + if k < n-1 { + bi.Dgemv(blas.Trans, m-rk, n-k-1, tau[k], + a[rk*lda+k+1:], lda, + a[rk*lda+k:], lda, + 0, + f[(k+1)*ldf+k:], ldf) + } + + // Padding F[0:k, k] with zeros. + for j := 0; j < k; j++ { + f[j*ldf+k] = 0 + } + + // Incremental updating of F: + // + // F[0:n, k] := F[0:n, k] - tau[k]*F[0:n, 0:k-1]*A[rk:m, 0:k-1]^T*A[rk:m,k]. + if k > 0 { + bi.Dgemv(blas.Trans, m-rk, k, -tau[k], + a[rk*lda:], lda, + a[rk*lda+k:], lda, + 0, + auxv, 1) + bi.Dgemv(blas.NoTrans, n, k, 1, + f, ldf, + auxv, 1, + 1, + f[k:], ldf) + } + + // Update the current row of A: + // + // A[rk, k+1:n] = A[rk, k+1:n] - A[rk, 0:k]*F[k+1:n, 0:k]^T. + if k < n-1 { + bi.Dgemv(blas.NoTrans, n-k-1, k+1, -1, + f[(k+1)*ldf:], ldf, + a[rk*lda:], 1, + 1, + a[rk*lda+k+1:], 1) + } + + // Update partial column norms. + if rk < lastrk-1 { + for j := k + 1; j < n; j++ { + if vn1[j] == 0 { + continue + } + + // The following marked lines follow from the + // analysis in Lapack Working Note 176. + r := math.Abs(a[rk*lda+j]) / vn1[j] // * + temp := math.Max(0, 1-r*r) // * + r = vn1[j] / vn2[j] // * + temp2 := temp * r * r // * + if temp2 < tol3z { + // vn2 is used here as a collection of + // indices into vn2 and also a collection + // of column norms. + vn2[j] = float64(lsticc) + lsticc = j + } else { + vn1[j] *= math.Sqrt(temp) // * + } + } + } + + a[rk*lda+k] = akk + } + kb = k + rk = offset + kb + + // Apply the block reflector to the rest of the matrix: + // + // A[offset+kb+1:m, kb+1:n] := A[offset+kb+1:m, kb+1:n] - A[offset+kb+1:m, 1:kb]*F[kb+1:n, 1:kb]^T. + if kb < min(n, m-offset) { + bi.Dgemm(blas.NoTrans, blas.Trans, + m-rk, n-kb, kb, -1, + a[rk*lda:], lda, + f[kb*ldf:], ldf, + 1, + a[rk*lda+kb:], lda) + } + + // Recomputation of difficult columns. + for lsticc >= 0 { + itemp := int(vn2[lsticc]) + + // NOTE: The computation of vn1[lsticc] relies on the fact that + // Dnrm2 does not fail on vectors with norm below the value of + // sqrt(dlamchS) + v := bi.Dnrm2(m-rk, a[rk*lda+lsticc:], lda) + vn1[lsticc] = v + vn2[lsticc] = v + + lsticc = itemp + } + + return kb +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go new file mode 100644 index 00000000..e9fbb600 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go @@ -0,0 +1,478 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" +) + +// Dlaqr04 computes the eigenvalues of a block of an n×n upper Hessenberg matrix +// H, and optionally the matrices T and Z from the Schur decomposition +// H = Z T Z^T +// where T is an upper quasi-triangular matrix (the Schur form), and Z is the +// orthogonal matrix of Schur vectors. +// +// wantt indicates whether the full Schur form T is required. If wantt is false, +// then only enough of H will be updated to preserve the eigenvalues. +// +// wantz indicates whether the n×n matrix of Schur vectors Z is required. If it +// is true, the orthogonal similarity transformation will be accumulated into +// Z[iloz:ihiz+1,ilo:ihi+1], otherwise Z will not be referenced. +// +// ilo and ihi determine the block of H on which Dlaqr04 operates. It must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0, +// and the block must be isolated, that is, +// ilo == 0 or H[ilo,ilo-1] == 0, +// ihi == n-1 or H[ihi+1,ihi] == 0, +// otherwise Dlaqr04 will panic. +// +// wr and wi must have length ihi+1. +// +// iloz and ihiz specify the rows of Z to which transformations will be applied +// if wantz is true. It must hold that +// 0 <= iloz <= ilo, and ihi <= ihiz < n, +// otherwise Dlaqr04 will panic. +// +// work must have length at least lwork and lwork must be +// lwork >= 1, if n <= 11, +// lwork >= n, if n > 11, +// otherwise Dlaqr04 will panic. lwork as large as 6*n may be required for +// optimal performance. On return, work[0] will contain the optimal value of +// lwork. +// +// If lwork is -1, instead of performing Dlaqr04, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// recur is the non-negative recursion depth. For recur > 0, Dlaqr04 behaves +// as DLAQR0, for recur == 0 it behaves as DLAQR4. +// +// unconverged indicates whether Dlaqr04 computed all the eigenvalues of H[ilo:ihi+1,ilo:ihi+1]. +// +// If unconverged is zero and wantt is true, H will contain on return the upper +// quasi-triangular matrix T from the Schur decomposition. 2×2 diagonal blocks +// (corresponding to complex conjugate pairs of eigenvalues) will be returned in +// standard form, with H[i,i] == H[i+1,i+1] and H[i+1,i]*H[i,i+1] < 0. +// +// If unconverged is zero and if wantt is false, the contents of h on return is +// unspecified. +// +// If unconverged is zero, all the eigenvalues have been computed and their real +// and imaginary parts will be stored on return in wr[ilo:ihi+1] and +// wi[ilo:ihi+1], respectively. If two eigenvalues are computed as a complex +// conjugate pair, they are stored in consecutive elements of wr and wi, say the +// i-th and (i+1)th, with wi[i] > 0 and wi[i+1] < 0. If wantt is true, then the +// eigenvalues are stored in the same order as on the diagonal of the Schur form +// returned in H, with wr[i] = H[i,i] and, if H[i:i+2,i:i+2] is a 2×2 diagonal +// block, wi[i] = sqrt(-H[i+1,i]*H[i,i+1]) and wi[i+1] = -wi[i]. +// +// If unconverged is positive, some eigenvalues have not converged, and +// wr[unconverged:ihi+1] and wi[unconverged:ihi+1] will contain those +// eigenvalues which have been successfully computed. Failures are rare. +// +// If unconverged is positive and wantt is true, then on return +// (initial H)*U = U*(final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged is positive and wantt is false, on return the remaining +// unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix +// H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged is positive and wantz is true, then on return +// (final Z) = (initial Z)*U, +// where U is the orthogonal matrix in (*) regardless of the value of wantt. +// +// References: +// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: +// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix +// Anal. Appl. 23(4) (2002), pp. 929—947 +// URL: http://dx.doi.org/10.1137/S0895479801384573 +// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +// Dlaqr04 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr04(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int, work []float64, lwork int, recur int) (unconverged int) { + const ( + // Matrices of order ntiny or smaller must be processed by + // Dlahqr because of insufficient subdiagonal scratch space. + // This is a hard limit. + ntiny = 11 + // Exceptional deflation windows: try to cure rare slow + // convergence by varying the size of the deflation window after + // kexnw iterations. + kexnw = 5 + // Exceptional shifts: try to cure rare slow convergence with + // ad-hoc exceptional shifts every kexsh iterations. + kexsh = 6 + + // See https://github.com/gonum/lapack/pull/151#discussion_r68162802 + // and the surrounding discussion for an explanation where these + // constants come from. + // TODO(vladimir-ch): Similar constants for exceptional shifts + // are used also in dlahqr.go. The first constant is different + // there, it is equal to 3. Why? And does it matter? + wilk1 = 0.75 + wilk2 = -0.4375 + ) + + switch { + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case ldh < max(1, n): + panic(badLdH) + case wantz && (iloz < 0 || ilo < iloz): + panic(badIloz) + case wantz && (ihiz < ihi || n <= ihiz): + panic(badIhiz) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + case lwork < 1 && lwork != -1: + panic(badLWork) + // TODO(vladimir-ch): Enable if and when we figure out what the minimum + // necessary lwork value is. Dlaqr04 says that the minimum is n which + // clashes with Dlaqr23's opinion about optimal work when nw <= 2 + // (independent of n). + // case lwork < n && n > ntiny && lwork != -1: + // panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + case recur < 0: + panic(recurLT0) + } + + // Quick return. + if n == 0 { + work[0] = 1 + return 0 + } + + if lwork != -1 { + switch { + case len(h) < (n-1)*ldh+n: + panic(shortH) + case len(wr) != ihi+1: + panic(badLenWr) + case len(wi) != ihi+1: + panic(badLenWi) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + case ilo > 0 && h[ilo*ldh+ilo-1] != 0: + panic(notIsolated) + case ihi+1 < n && h[(ihi+1)*ldh+ihi] != 0: + panic(notIsolated) + } + } + + if n <= ntiny { + // Tiny matrices must use Dlahqr. + if lwork == -1 { + work[0] = 1 + return 0 + } + return impl.Dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz) + } + + // Use small bulge multi-shift QR with aggressive early deflation on + // larger-than-tiny matrices. + var jbcmpz string + if wantt { + jbcmpz = "S" + } else { + jbcmpz = "E" + } + if wantz { + jbcmpz += "V" + } else { + jbcmpz += "N" + } + + var fname string + if recur > 0 { + fname = "DLAQR0" + } else { + fname = "DLAQR4" + } + // nwr is the recommended deflation window size. n is greater than 11, + // so there is enough subdiagonal workspace for nwr >= 2 as required. + // (In fact, there is enough subdiagonal space for nwr >= 3.) + // TODO(vladimir-ch): If there is enough space for nwr >= 3, should we + // use it? + nwr := impl.Ilaenv(13, fname, jbcmpz, n, ilo, ihi, lwork) + nwr = max(2, nwr) + nwr = min(ihi-ilo+1, min((n-1)/3, nwr)) + + // nsr is the recommended number of simultaneous shifts. n is greater + // than 11, so there is enough subdiagonal workspace for nsr to be even + // and greater than or equal to two as required. + nsr := impl.Ilaenv(15, fname, jbcmpz, n, ilo, ihi, lwork) + nsr = min(nsr, min((n+6)/9, ihi-ilo)) + nsr = max(2, nsr&^1) + + // Workspace query call to Dlaqr23. + impl.Dlaqr23(wantt, wantz, n, ilo, ihi, nwr+1, h, ldh, iloz, ihiz, z, ldz, + wr, wi, h, ldh, n, h, ldh, n, h, ldh, work, -1, recur) + // Optimal workspace is max(Dlaqr5, Dlaqr23). + lwkopt := max(3*nsr/2, int(work[0])) + // Quick return in case of workspace query. + if lwork == -1 { + work[0] = float64(lwkopt) + return 0 + } + + // Dlahqr/Dlaqr04 crossover point. + nmin := impl.Ilaenv(12, fname, jbcmpz, n, ilo, ihi, lwork) + nmin = max(ntiny, nmin) + + // Nibble determines when to skip a multi-shift QR sweep (Dlaqr5). + nibble := impl.Ilaenv(14, fname, jbcmpz, n, ilo, ihi, lwork) + nibble = max(0, nibble) + + // Computation mode of far-from-diagonal orthogonal updates in Dlaqr5. + kacc22 := impl.Ilaenv(16, fname, jbcmpz, n, ilo, ihi, lwork) + kacc22 = max(0, min(kacc22, 2)) + + // nwmax is the largest possible deflation window for which there is + // sufficient workspace. + nwmax := min((n-1)/3, lwork/2) + nw := nwmax // Start with maximum deflation window size. + + // nsmax is the largest number of simultaneous shifts for which there is + // sufficient workspace. + nsmax := min((n+6)/9, 2*lwork/3) &^ 1 + + ndfl := 1 // Number of iterations since last deflation. + ndec := 0 // Deflation window size decrement. + + // Main loop. + var ( + itmax = max(30, 2*kexsh) * max(10, (ihi-ilo+1)) + it = 0 + ) + for kbot := ihi; kbot >= ilo; { + if it == itmax { + unconverged = kbot + 1 + break + } + it++ + + // Locate active block. + ktop := ilo + for k := kbot; k >= ilo+1; k-- { + if h[k*ldh+k-1] == 0 { + ktop = k + break + } + } + + // Select deflation window size nw. + // + // Typical Case: + // If possible and advisable, nibble the entire active block. + // If not, use size min(nwr,nwmax) or min(nwr+1,nwmax) + // depending upon which has the smaller corresponding + // subdiagonal entry (a heuristic). + // + // Exceptional Case: + // If there have been no deflations in kexnw or more + // iterations, then vary the deflation window size. At first, + // because larger windows are, in general, more powerful than + // smaller ones, rapidly increase the window to the maximum + // possible. Then, gradually reduce the window size. + nh := kbot - ktop + 1 + nwupbd := min(nh, nwmax) + if ndfl < kexnw { + nw = min(nwupbd, nwr) + } else { + nw = min(nwupbd, 2*nw) + } + if nw < nwmax { + if nw >= nh-1 { + nw = nh + } else { + kwtop := kbot - nw + 1 + if math.Abs(h[kwtop*ldh+kwtop-1]) > math.Abs(h[(kwtop-1)*ldh+kwtop-2]) { + nw++ + } + } + } + if ndfl < kexnw { + ndec = -1 + } else if ndec >= 0 || nw >= nwupbd { + ndec++ + if nw-ndec < 2 { + ndec = 0 + } + nw -= ndec + } + + // Split workspace under the subdiagonal of H into: + // - an nw×nw work array V in the lower left-hand corner, + // - an nw×nhv horizontal work array along the bottom edge (nhv + // must be at least nw but more is better), + // - an nve×nw vertical work array along the left-hand-edge + // (nhv can be any positive integer but more is better). + kv := n - nw + kt := nw + kwv := nw + 1 + nhv := n - kwv - kt + // Aggressive early deflation. + ls, ld := impl.Dlaqr23(wantt, wantz, n, ktop, kbot, nw, + h, ldh, iloz, ihiz, z, ldz, wr[:kbot+1], wi[:kbot+1], + h[kv*ldh:], ldh, nhv, h[kv*ldh+kt:], ldh, nhv, h[kwv*ldh:], ldh, work, lwork, recur) + + // Adjust kbot accounting for new deflations. + kbot -= ld + // ks points to the shifts. + ks := kbot - ls + 1 + + // Skip an expensive QR sweep if there is a (partly heuristic) + // reason to expect that many eigenvalues will deflate without + // it. Here, the QR sweep is skipped if many eigenvalues have + // just been deflated or if the remaining active block is small. + if ld > 0 && (100*ld > nw*nibble || kbot-ktop+1 <= min(nmin, nwmax)) { + // ld is positive, note progress. + ndfl = 1 + continue + } + + // ns is the nominal number of simultaneous shifts. This may be + // lowered (slightly) if Dlaqr23 did not provide that many + // shifts. + ns := min(min(nsmax, nsr), max(2, kbot-ktop)) &^ 1 + + // If there have been no deflations in a multiple of kexsh + // iterations, then try exceptional shifts. Otherwise use shifts + // provided by Dlaqr23 above or from the eigenvalues of a + // trailing principal submatrix. + if ndfl%kexsh == 0 { + ks = kbot - ns + 1 + for i := kbot; i > max(ks, ktop+1); i -= 2 { + ss := math.Abs(h[i*ldh+i-1]) + math.Abs(h[(i-1)*ldh+i-2]) + aa := wilk1*ss + h[i*ldh+i] + _, _, _, _, wr[i-1], wi[i-1], wr[i], wi[i], _, _ = + impl.Dlanv2(aa, ss, wilk2*ss, aa) + } + if ks == ktop { + wr[ks+1] = h[(ks+1)*ldh+ks+1] + wi[ks+1] = 0 + wr[ks] = wr[ks+1] + wi[ks] = wi[ks+1] + } + } else { + // If we got ns/2 or fewer shifts, use Dlahqr or recur + // into Dlaqr04 on a trailing principal submatrix to get + // more. Since ns <= nsmax <=(n+6)/9, there is enough + // space below the subdiagonal to fit an ns×ns scratch + // array. + if kbot-ks+1 <= ns/2 { + ks = kbot - ns + 1 + kt = n - ns + impl.Dlacpy(blas.All, ns, ns, h[ks*ldh+ks:], ldh, h[kt*ldh:], ldh) + if ns > nmin && recur > 0 { + ks += impl.Dlaqr04(false, false, ns, 1, ns-1, h[kt*ldh:], ldh, + wr[ks:ks+ns], wi[ks:ks+ns], 0, 0, nil, 0, work, lwork, recur-1) + } else { + ks += impl.Dlahqr(false, false, ns, 0, ns-1, h[kt*ldh:], ldh, + wr[ks:ks+ns], wi[ks:ks+ns], 0, 0, nil, 1) + } + // In case of a rare QR failure use eigenvalues + // of the trailing 2×2 principal submatrix. + if ks >= kbot { + aa := h[(kbot-1)*ldh+kbot-1] + bb := h[(kbot-1)*ldh+kbot] + cc := h[kbot*ldh+kbot-1] + dd := h[kbot*ldh+kbot] + _, _, _, _, wr[kbot-1], wi[kbot-1], wr[kbot], wi[kbot], _, _ = + impl.Dlanv2(aa, bb, cc, dd) + ks = kbot - 1 + } + } + + if kbot-ks+1 > ns { + // Sorting the shifts helps a little. Bubble + // sort keeps complex conjugate pairs together. + sorted := false + for k := kbot; k > ks; k-- { + if sorted { + break + } + sorted = true + for i := ks; i < k; i++ { + if math.Abs(wr[i])+math.Abs(wi[i]) >= math.Abs(wr[i+1])+math.Abs(wi[i+1]) { + continue + } + sorted = false + wr[i], wr[i+1] = wr[i+1], wr[i] + wi[i], wi[i+1] = wi[i+1], wi[i] + } + } + } + + // Shuffle shifts into pairs of real shifts and pairs of + // complex conjugate shifts using the fact that complex + // conjugate shifts are already adjacent to one another. + // TODO(vladimir-ch): The shuffling here could probably + // be removed but I'm not sure right now and it's safer + // to leave it. + for i := kbot; i > ks+1; i -= 2 { + if wi[i] == -wi[i-1] { + continue + } + wr[i], wr[i-1], wr[i-2] = wr[i-1], wr[i-2], wr[i] + wi[i], wi[i-1], wi[i-2] = wi[i-1], wi[i-2], wi[i] + } + } + + // If there are only two shifts and both are real, then use only one. + if kbot-ks+1 == 2 && wi[kbot] == 0 { + if math.Abs(wr[kbot]-h[kbot*ldh+kbot]) < math.Abs(wr[kbot-1]-h[kbot*ldh+kbot]) { + wr[kbot-1] = wr[kbot] + } else { + wr[kbot] = wr[kbot-1] + } + } + + // Use up to ns of the smallest magnitude shifts. If there + // aren't ns shifts available, then use them all, possibly + // dropping one to make the number of shifts even. + ns = min(ns, kbot-ks+1) &^ 1 + ks = kbot - ns + 1 + + // Split workspace under the subdiagonal into: + // - a kdu×kdu work array U in the lower left-hand-corner, + // - a kdu×nhv horizontal work array WH along the bottom edge + // (nhv must be at least kdu but more is better), + // - an nhv×kdu vertical work array WV along the left-hand-edge + // (nhv must be at least kdu but more is better). + kdu := 3*ns - 3 + ku := n - kdu + kwh := kdu + kwv = kdu + 3 + nhv = n - kwv - kdu + // Small-bulge multi-shift QR sweep. + impl.Dlaqr5(wantt, wantz, kacc22, n, ktop, kbot, ns, + wr[ks:ks+ns], wi[ks:ks+ns], h, ldh, iloz, ihiz, z, ldz, + work, 3, h[ku*ldh:], ldh, nhv, h[kwv*ldh:], ldh, nhv, h[ku*ldh+kwh:], ldh) + + // Note progress (or the lack of it). + if ld > 0 { + ndfl = 1 + } else { + ndfl++ + } + } + + work[0] = float64(lwkopt) + return unconverged +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go new file mode 100644 index 00000000..e21373bd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go @@ -0,0 +1,59 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaqr1 sets v to a scalar multiple of the first column of the product +// (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) +// where H is a 2×2 or 3×3 matrix, I is the identity matrix of the same size, +// and i is the imaginary unit. Scaling is done to avoid overflows and most +// underflows. +// +// n is the order of H and must be either 2 or 3. It must hold that either sr1 = +// sr2 and si1 = -si2, or si1 = si2 = 0. The length of v must be equal to n. If +// any of these conditions is not met, Dlaqr1 will panic. +// +// Dlaqr1 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr1(n int, h []float64, ldh int, sr1, si1, sr2, si2 float64, v []float64) { + switch { + case n != 2 && n != 3: + panic("lapack: n must be 2 or 3") + case ldh < n: + panic(badLdH) + case len(h) < (n-1)*ldh+n: + panic(shortH) + case !((sr1 == sr2 && si1 == -si2) || (si1 == 0 && si2 == 0)): + panic(badShifts) + case len(v) != n: + panic(shortV) + } + + if n == 2 { + s := math.Abs(h[0]-sr2) + math.Abs(si2) + math.Abs(h[ldh]) + if s == 0 { + v[0] = 0 + v[1] = 0 + } else { + h21s := h[ldh] / s + v[0] = h21s*h[1] + (h[0]-sr1)*((h[0]-sr2)/s) - si1*(si2/s) + v[1] = h21s * (h[0] + h[ldh+1] - sr1 - sr2) + } + return + } + + s := math.Abs(h[0]-sr2) + math.Abs(si2) + math.Abs(h[ldh]) + math.Abs(h[2*ldh]) + if s == 0 { + v[0] = 0 + v[1] = 0 + v[2] = 0 + } else { + h21s := h[ldh] / s + h31s := h[2*ldh] / s + v[0] = (h[0]-sr1)*((h[0]-sr2)/s) - si1*(si2/s) + h[1]*h21s + h[2]*h31s + v[1] = h21s*(h[0]+h[ldh+1]-sr1-sr2) + h[ldh+2]*h31s + v[2] = h31s*(h[0]+h[2*ldh+2]-sr1-sr2) + h21s*h[2*ldh+1] + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go new file mode 100644 index 00000000..ff299a73 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go @@ -0,0 +1,415 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlaqr23 performs the orthogonal similarity transformation of an n×n upper +// Hessenberg matrix to detect and deflate fully converged eigenvalues from a +// trailing principal submatrix using aggressive early deflation [1]. +// +// On return, H will be overwritten by a new Hessenberg matrix that is a +// perturbation of an orthogonal similarity transformation of H. It is hoped +// that on output H will have many zero subdiagonal entries. +// +// If wantt is true, the matrix H will be fully updated so that the +// quasi-triangular Schur factor can be computed. If wantt is false, then only +// enough of H will be updated to preserve the eigenvalues. +// +// If wantz is true, the orthogonal similarity transformation will be +// accumulated into Z[iloz:ihiz+1,ktop:kbot+1], otherwise Z is not referenced. +// +// ktop and kbot determine a block [ktop:kbot+1,ktop:kbot+1] along the diagonal +// of H. It must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0, +// and the block must be isolated, that is, it must hold that +// ktop == 0 or H[ktop,ktop-1] == 0, +// kbot == n-1 or H[kbot+1,kbot] == 0, +// otherwise Dlaqr23 will panic. +// +// nw is the deflation window size. It must hold that +// 0 <= nw <= kbot-ktop+1, +// otherwise Dlaqr23 will panic. +// +// iloz and ihiz specify the rows of the n×n matrix Z to which transformations +// will be applied if wantz is true. It must hold that +// 0 <= iloz <= ktop, and kbot <= ihiz < n, +// otherwise Dlaqr23 will panic. +// +// sr and si must have length kbot+1, otherwise Dlaqr23 will panic. +// +// v and ldv represent an nw×nw work matrix. +// t and ldt represent an nw×nh work matrix, and nh must be at least nw. +// wv and ldwv represent an nv×nw work matrix. +// +// work must have length at least lwork and lwork must be at least max(1,2*nw), +// otherwise Dlaqr23 will panic. Larger values of lwork may result in greater +// efficiency. On return, work[0] will contain the optimal value of lwork. +// +// If lwork is -1, instead of performing Dlaqr23, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// recur is the non-negative recursion depth. For recur > 0, Dlaqr23 behaves +// as DLAQR3, for recur == 0 it behaves as DLAQR2. +// +// On return, ns and nd will contain respectively the number of unconverged +// (i.e., approximate) eigenvalues and converged eigenvalues that are stored in +// sr and si. +// +// On return, the real and imaginary parts of approximate eigenvalues that may +// be used for shifts will be stored respectively in sr[kbot-nd-ns+1:kbot-nd+1] +// and si[kbot-nd-ns+1:kbot-nd+1]. +// +// On return, the real and imaginary parts of converged eigenvalues will be +// stored respectively in sr[kbot-nd+1:kbot+1] and si[kbot-nd+1:kbot+1]. +// +// References: +// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +func (impl Implementation) Dlaqr23(wantt, wantz bool, n, ktop, kbot, nw int, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, sr, si []float64, v []float64, ldv int, nh int, t []float64, ldt int, nv int, wv []float64, ldwv int, work []float64, lwork int, recur int) (ns, nd int) { + switch { + case n < 0: + panic(nLT0) + case ktop < 0 || max(0, n-1) < ktop: + panic(badKtop) + case kbot < min(ktop, n-1) || n <= kbot: + panic(badKbot) + case nw < 0 || kbot-ktop+1+1 < nw: + panic(badNw) + case ldh < max(1, n): + panic(badLdH) + case wantz && (iloz < 0 || ktop < iloz): + panic(badIloz) + case wantz && (ihiz < kbot || n <= ihiz): + panic(badIhiz) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + case ldv < max(1, nw): + panic(badLdV) + case nh < nw: + panic(badNh) + case ldt < max(1, nh): + panic(badLdT) + case nv < 0: + panic(nvLT0) + case ldwv < max(1, nw): + panic(badLdWV) + case lwork < max(1, 2*nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + case recur < 0: + panic(recurLT0) + } + + // Quick return for zero window size. + if nw == 0 { + work[0] = 1 + return 0, 0 + } + + // LAPACK code does not enforce the documented behavior + // nw <= kbot-ktop+1 + // but we do (we panic above). + jw := nw + lwkopt := max(1, 2*nw) + if jw > 2 { + // Workspace query call to Dgehrd. + impl.Dgehrd(jw, 0, jw-2, t, ldt, work, work, -1) + lwk1 := int(work[0]) + // Workspace query call to Dormhr. + impl.Dormhr(blas.Right, blas.NoTrans, jw, jw, 0, jw-2, t, ldt, work, v, ldv, work, -1) + lwk2 := int(work[0]) + if recur > 0 { + // Workspace query call to Dlaqr04. + impl.Dlaqr04(true, true, jw, 0, jw-1, t, ldt, sr, si, 0, jw-1, v, ldv, work, -1, recur-1) + lwk3 := int(work[0]) + // Optimal workspace. + lwkopt = max(jw+max(lwk1, lwk2), lwk3) + } else { + // Optimal workspace. + lwkopt = jw + max(lwk1, lwk2) + } + } + // Quick return in case of workspace query. + if lwork == -1 { + work[0] = float64(lwkopt) + return 0, 0 + } + + // Check input slices only if not doing workspace query. + switch { + case len(h) < (n-1)*ldh+n: + panic(shortH) + case len(v) < (nw-1)*ldv+nw: + panic(shortV) + case len(t) < (nw-1)*ldt+nh: + panic(shortT) + case len(wv) < (nv-1)*ldwv+nw: + panic(shortWV) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + case len(sr) != kbot+1: + panic(badLenSr) + case len(si) != kbot+1: + panic(badLenSi) + case ktop > 0 && h[ktop*ldh+ktop-1] != 0: + panic(notIsolated) + case kbot+1 < n && h[(kbot+1)*ldh+kbot] != 0: + panic(notIsolated) + } + + // Machine constants. + ulp := dlamchP + smlnum := float64(n) / ulp * dlamchS + + // Setup deflation window. + var s float64 + kwtop := kbot - jw + 1 + if kwtop != ktop { + s = h[kwtop*ldh+kwtop-1] + } + if kwtop == kbot { + // 1×1 deflation window. + sr[kwtop] = h[kwtop*ldh+kwtop] + si[kwtop] = 0 + ns = 1 + nd = 0 + if math.Abs(s) <= math.Max(smlnum, ulp*math.Abs(h[kwtop*ldh+kwtop])) { + ns = 0 + nd = 1 + if kwtop > ktop { + h[kwtop*ldh+kwtop-1] = 0 + } + } + work[0] = 1 + return ns, nd + } + + // Convert to spike-triangular form. In case of a rare QR failure, this + // routine continues to do aggressive early deflation using that part of + // the deflation window that converged using infqr here and there to + // keep track. + impl.Dlacpy(blas.Upper, jw, jw, h[kwtop*ldh+kwtop:], ldh, t, ldt) + bi := blas64.Implementation() + bi.Dcopy(jw-1, h[(kwtop+1)*ldh+kwtop:], ldh+1, t[ldt:], ldt+1) + impl.Dlaset(blas.All, jw, jw, 0, 1, v, ldv) + nmin := impl.Ilaenv(12, "DLAQR3", "SV", jw, 0, jw-1, lwork) + var infqr int + if recur > 0 && jw > nmin { + infqr = impl.Dlaqr04(true, true, jw, 0, jw-1, t, ldt, sr[kwtop:], si[kwtop:], 0, jw-1, v, ldv, work, lwork, recur-1) + } else { + infqr = impl.Dlahqr(true, true, jw, 0, jw-1, t, ldt, sr[kwtop:], si[kwtop:], 0, jw-1, v, ldv) + } + // Note that ilo == 0 which conveniently coincides with the success + // value of infqr, that is, infqr as an index always points to the first + // converged eigenvalue. + + // Dtrexc needs a clean margin near the diagonal. + for j := 0; j < jw-3; j++ { + t[(j+2)*ldt+j] = 0 + t[(j+3)*ldt+j] = 0 + } + if jw >= 3 { + t[(jw-1)*ldt+jw-3] = 0 + } + + ns = jw + ilst := infqr + // Deflation detection loop. + for ilst < ns { + bulge := false + if ns >= 2 { + bulge = t[(ns-1)*ldt+ns-2] != 0 + } + if !bulge { + // Real eigenvalue. + abst := math.Abs(t[(ns-1)*ldt+ns-1]) + if abst == 0 { + abst = math.Abs(s) + } + if math.Abs(s*v[ns-1]) <= math.Max(smlnum, ulp*abst) { + // Deflatable. + ns-- + } else { + // Undeflatable, move it up out of the way. + // Dtrexc can not fail in this case. + _, ilst, _ = impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, ns-1, ilst, work) + ilst++ + } + continue + } + // Complex conjugate pair. + abst := math.Abs(t[(ns-1)*ldt+ns-1]) + math.Sqrt(math.Abs(t[(ns-1)*ldt+ns-2]))*math.Sqrt(math.Abs(t[(ns-2)*ldt+ns-1])) + if abst == 0 { + abst = math.Abs(s) + } + if math.Max(math.Abs(s*v[ns-1]), math.Abs(s*v[ns-2])) <= math.Max(smlnum, ulp*abst) { + // Deflatable. + ns -= 2 + } else { + // Undeflatable, move them up out of the way. + // Dtrexc does the right thing with ilst in case of a + // rare exchange failure. + _, ilst, _ = impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, ns-1, ilst, work) + ilst += 2 + } + } + + // Return to Hessenberg form. + if ns == 0 { + s = 0 + } + if ns < jw { + // Sorting diagonal blocks of T improves accuracy for graded + // matrices. Bubble sort deals well with exchange failures. + sorted := false + i := ns + for !sorted { + sorted = true + kend := i - 1 + i = infqr + var k int + if i == ns-1 || t[(i+1)*ldt+i] == 0 { + k = i + 1 + } else { + k = i + 2 + } + for k <= kend { + var evi float64 + if k == i+1 { + evi = math.Abs(t[i*ldt+i]) + } else { + evi = math.Abs(t[i*ldt+i]) + math.Sqrt(math.Abs(t[(i+1)*ldt+i]))*math.Sqrt(math.Abs(t[i*ldt+i+1])) + } + + var evk float64 + if k == kend || t[(k+1)*ldt+k] == 0 { + evk = math.Abs(t[k*ldt+k]) + } else { + evk = math.Abs(t[k*ldt+k]) + math.Sqrt(math.Abs(t[(k+1)*ldt+k]))*math.Sqrt(math.Abs(t[k*ldt+k+1])) + } + + if evi >= evk { + i = k + } else { + sorted = false + _, ilst, ok := impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, i, k, work) + if ok { + i = ilst + } else { + i = k + } + } + if i == kend || t[(i+1)*ldt+i] == 0 { + k = i + 1 + } else { + k = i + 2 + } + } + } + } + + // Restore shift/eigenvalue array from T. + for i := jw - 1; i >= infqr; { + if i == infqr || t[i*ldt+i-1] == 0 { + sr[kwtop+i] = t[i*ldt+i] + si[kwtop+i] = 0 + i-- + continue + } + aa := t[(i-1)*ldt+i-1] + bb := t[(i-1)*ldt+i] + cc := t[i*ldt+i-1] + dd := t[i*ldt+i] + _, _, _, _, sr[kwtop+i-1], si[kwtop+i-1], sr[kwtop+i], si[kwtop+i], _, _ = impl.Dlanv2(aa, bb, cc, dd) + i -= 2 + } + + if ns < jw || s == 0 { + if ns > 1 && s != 0 { + // Reflect spike back into lower triangle. + bi.Dcopy(ns, v[:ns], 1, work[:ns], 1) + _, tau := impl.Dlarfg(ns, work[0], work[1:ns], 1) + work[0] = 1 + impl.Dlaset(blas.Lower, jw-2, jw-2, 0, 0, t[2*ldt:], ldt) + impl.Dlarf(blas.Left, ns, jw, work[:ns], 1, tau, t, ldt, work[jw:]) + impl.Dlarf(blas.Right, ns, ns, work[:ns], 1, tau, t, ldt, work[jw:]) + impl.Dlarf(blas.Right, jw, ns, work[:ns], 1, tau, v, ldv, work[jw:]) + impl.Dgehrd(jw, 0, ns-1, t, ldt, work[:jw-1], work[jw:], lwork-jw) + } + + // Copy updated reduced window into place. + if kwtop > 0 { + h[kwtop*ldh+kwtop-1] = s * v[0] + } + impl.Dlacpy(blas.Upper, jw, jw, t, ldt, h[kwtop*ldh+kwtop:], ldh) + bi.Dcopy(jw-1, t[ldt:], ldt+1, h[(kwtop+1)*ldh+kwtop:], ldh+1) + + // Accumulate orthogonal matrix in order to update H and Z, if + // requested. + if ns > 1 && s != 0 { + // work[:ns-1] contains the elementary reflectors stored + // by a call to Dgehrd above. + impl.Dormhr(blas.Right, blas.NoTrans, jw, ns, 0, ns-1, + t, ldt, work[:ns-1], v, ldv, work[jw:], lwork-jw) + } + + // Update vertical slab in H. + var ltop int + if !wantt { + ltop = ktop + } + for krow := ltop; krow < kwtop; krow += nv { + kln := min(nv, kwtop-krow) + bi.Dgemm(blas.NoTrans, blas.NoTrans, kln, jw, jw, + 1, h[krow*ldh+kwtop:], ldh, v, ldv, + 0, wv, ldwv) + impl.Dlacpy(blas.All, kln, jw, wv, ldwv, h[krow*ldh+kwtop:], ldh) + } + + // Update horizontal slab in H. + if wantt { + for kcol := kbot + 1; kcol < n; kcol += nh { + kln := min(nh, n-kcol) + bi.Dgemm(blas.Trans, blas.NoTrans, jw, kln, jw, + 1, v, ldv, h[kwtop*ldh+kcol:], ldh, + 0, t, ldt) + impl.Dlacpy(blas.All, jw, kln, t, ldt, h[kwtop*ldh+kcol:], ldh) + } + } + + // Update vertical slab in Z. + if wantz { + for krow := iloz; krow <= ihiz; krow += nv { + kln := min(nv, ihiz-krow+1) + bi.Dgemm(blas.NoTrans, blas.NoTrans, kln, jw, jw, + 1, z[krow*ldz+kwtop:], ldz, v, ldv, + 0, wv, ldwv) + impl.Dlacpy(blas.All, kln, jw, wv, ldwv, z[krow*ldz+kwtop:], ldz) + } + } + } + + // The number of deflations. + nd = jw - ns + // Shifts are converged eigenvalues that could not be deflated. + // Subtracting infqr from the spike length takes care of the case of a + // rare QR failure while calculating eigenvalues of the deflation + // window. + ns -= infqr + work[0] = float64(lwkopt) + return ns, nd +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go new file mode 100644 index 00000000..c198f229 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go @@ -0,0 +1,644 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqr5 performs a single small-bulge multi-shift QR sweep on an isolated +// block of a Hessenberg matrix. +// +// wantt and wantz determine whether the quasi-triangular Schur factor and the +// orthogonal Schur factor, respectively, will be computed. +// +// kacc22 specifies the computation mode of far-from-diagonal orthogonal +// updates. Permitted values are: +// 0: Dlaqr5 will not accumulate reflections and will not use matrix-matrix +// multiply to update far-from-diagonal matrix entries. +// 1: Dlaqr5 will accumulate reflections and use matrix-matrix multiply to +// update far-from-diagonal matrix entries. +// 2: Dlaqr5 will accumulate reflections, use matrix-matrix multiply to update +// far-from-diagonal matrix entries, and take advantage of 2×2 block +// structure during matrix multiplies. +// For other values of kacc2 Dlaqr5 will panic. +// +// n is the order of the Hessenberg matrix H. +// +// ktop and kbot are indices of the first and last row and column of an isolated +// diagonal block upon which the QR sweep will be applied. It must hold that +// ktop == 0, or 0 < ktop <= n-1 and H[ktop, ktop-1] == 0, and +// kbot == n-1, or 0 <= kbot < n-1 and H[kbot+1, kbot] == 0, +// otherwise Dlaqr5 will panic. +// +// nshfts is the number of simultaneous shifts. It must be positive and even, +// otherwise Dlaqr5 will panic. +// +// sr and si contain the real and imaginary parts, respectively, of the shifts +// of origin that define the multi-shift QR sweep. On return both slices may be +// reordered by Dlaqr5. Their length must be equal to nshfts, otherwise Dlaqr5 +// will panic. +// +// h and ldh represent the Hessenberg matrix H of size n×n. On return +// multi-shift QR sweep with shifts sr+i*si has been applied to the isolated +// diagonal block in rows and columns ktop through kbot, inclusive. +// +// iloz and ihiz specify the rows of Z to which transformations will be applied +// if wantz is true. It must hold that 0 <= iloz <= ihiz < n, otherwise Dlaqr5 +// will panic. +// +// z and ldz represent the matrix Z of size n×n. If wantz is true, the QR sweep +// orthogonal similarity transformation is accumulated into +// z[iloz:ihiz,iloz:ihiz] from the right, otherwise z not referenced. +// +// v and ldv represent an auxiliary matrix V of size (nshfts/2)×3. Note that V +// is transposed with respect to the reference netlib implementation. +// +// u and ldu represent an auxiliary matrix of size (3*nshfts-3)×(3*nshfts-3). +// +// wh and ldwh represent an auxiliary matrix of size (3*nshfts-3)×nh. +// +// wv and ldwv represent an auxiliary matrix of size nv×(3*nshfts-3). +// +// Dlaqr5 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr5(wantt, wantz bool, kacc22 int, n, ktop, kbot, nshfts int, sr, si []float64, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, v []float64, ldv int, u []float64, ldu int, nv int, wv []float64, ldwv int, nh int, wh []float64, ldwh int) { + switch { + case kacc22 != 0 && kacc22 != 1 && kacc22 != 2: + panic(badKacc22) + case n < 0: + panic(nLT0) + case ktop < 0 || n <= ktop: + panic(badKtop) + case kbot < 0 || n <= kbot: + panic(badKbot) + + case nshfts < 0: + panic(nshftsLT0) + case nshfts&0x1 != 0: + panic(nshftsOdd) + case len(sr) != nshfts: + panic(badLenSr) + case len(si) != nshfts: + panic(badLenSi) + + case ldh < max(1, n): + panic(badLdH) + case len(h) < (n-1)*ldh+n: + panic(shortH) + + case wantz && ihiz >= n: + panic(badIhiz) + case wantz && iloz < 0 || ihiz < iloz: + panic(badIloz) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + + case ldv < 3: + // V is transposed w.r.t. reference lapack. + panic(badLdV) + case len(v) < (nshfts/2-1)*ldv+3: + panic(shortV) + + case ldu < max(1, 3*nshfts-3): + panic(badLdU) + case len(u) < (3*nshfts-3-1)*ldu+3*nshfts-3: + panic(shortU) + + case nv < 0: + panic(nvLT0) + case ldwv < max(1, 3*nshfts-3): + panic(badLdWV) + case len(wv) < (nv-1)*ldwv+3*nshfts-3: + panic(shortWV) + + case nh < 0: + panic(nhLT0) + case ldwh < max(1, nh): + panic(badLdWH) + case len(wh) < (3*nshfts-3-1)*ldwh+nh: + panic(shortWH) + + case ktop > 0 && h[ktop*ldh+ktop-1] != 0: + panic(notIsolated) + case kbot < n-1 && h[(kbot+1)*ldh+kbot] != 0: + panic(notIsolated) + } + + // If there are no shifts, then there is nothing to do. + if nshfts < 2 { + return + } + // If the active block is empty or 1×1, then there is nothing to do. + if ktop >= kbot { + return + } + + // Shuffle shifts into pairs of real shifts and pairs of complex + // conjugate shifts assuming complex conjugate shifts are already + // adjacent to one another. + for i := 0; i < nshfts-2; i += 2 { + if si[i] == -si[i+1] { + continue + } + sr[i], sr[i+1], sr[i+2] = sr[i+1], sr[i+2], sr[i] + si[i], si[i+1], si[i+2] = si[i+1], si[i+2], si[i] + } + + // Note: lapack says that nshfts must be even but allows it to be odd + // anyway. We panic above if nshfts is not even, so reducing it by one + // is unnecessary. The only caller Dlaqr04 uses only even nshfts. + // + // The original comment and code from lapack-3.6.0/SRC/dlaqr5.f:341: + // * ==== NSHFTS is supposed to be even, but if it is odd, + // * . then simply reduce it by one. The shuffle above + // * . ensures that the dropped shift is real and that + // * . the remaining shifts are paired. ==== + // * + // NS = NSHFTS - MOD( NSHFTS, 2 ) + ns := nshfts + + safmin := dlamchS + ulp := dlamchP + smlnum := safmin * float64(n) / ulp + + // Use accumulated reflections to update far-from-diagonal entries? + accum := kacc22 == 1 || kacc22 == 2 + // If so, exploit the 2×2 block structure? + blk22 := ns > 2 && kacc22 == 2 + + // Clear trash. + if ktop+2 <= kbot { + h[(ktop+2)*ldh+ktop] = 0 + } + + // nbmps = number of 2-shift bulges in the chain. + nbmps := ns / 2 + + // kdu = width of slab. + kdu := 6*nbmps - 3 + + // Create and chase chains of nbmps bulges. + for incol := 3*(1-nbmps) + ktop - 1; incol <= kbot-2; incol += 3*nbmps - 2 { + ndcol := incol + kdu + if accum { + impl.Dlaset(blas.All, kdu, kdu, 0, 1, u, ldu) + } + + // Near-the-diagonal bulge chase. The following loop performs + // the near-the-diagonal part of a small bulge multi-shift QR + // sweep. Each 6*nbmps-2 column diagonal chunk extends from + // column incol to column ndcol (including both column incol and + // column ndcol). The following loop chases a 3*nbmps column + // long chain of nbmps bulges 3*nbmps-2 columns to the right. + // (incol may be less than ktop and ndcol may be greater than + // kbot indicating phantom columns from which to chase bulges + // before they are actually introduced or to which to chase + // bulges beyond column kbot.) + for krcol := incol; krcol <= min(incol+3*nbmps-3, kbot-2); krcol++ { + // Bulges number mtop to mbot are active double implicit + // shift bulges. There may or may not also be small 2×2 + // bulge, if there is room. The inactive bulges (if any) + // must wait until the active bulges have moved down the + // diagonal to make room. The phantom matrix paradigm + // described above helps keep track. + + mtop := max(0, ((ktop-1)-krcol+2)/3) + mbot := min(nbmps, (kbot-krcol)/3) - 1 + m22 := mbot + 1 + bmp22 := (mbot < nbmps-1) && (krcol+3*m22 == kbot-2) + + // Generate reflections to chase the chain right one + // column. (The minimum value of k is ktop-1.) + for m := mtop; m <= mbot; m++ { + k := krcol + 3*m + if k == ktop-1 { + impl.Dlaqr1(3, h[ktop*ldh+ktop:], ldh, + sr[2*m], si[2*m], sr[2*m+1], si[2*m+1], + v[m*ldv:m*ldv+3]) + alpha := v[m*ldv] + _, v[m*ldv] = impl.Dlarfg(3, alpha, v[m*ldv+1:m*ldv+3], 1) + continue + } + beta := h[(k+1)*ldh+k] + v[m*ldv+1] = h[(k+2)*ldh+k] + v[m*ldv+2] = h[(k+3)*ldh+k] + beta, v[m*ldv] = impl.Dlarfg(3, beta, v[m*ldv+1:m*ldv+3], 1) + + // A bulge may collapse because of vigilant deflation or + // destructive underflow. In the underflow case, try the + // two-small-subdiagonals trick to try to reinflate the + // bulge. + if h[(k+3)*ldh+k] != 0 || h[(k+3)*ldh+k+1] != 0 || h[(k+3)*ldh+k+2] == 0 { + // Typical case: not collapsed (yet). + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + continue + } + + // Atypical case: collapsed. Attempt to reintroduce + // ignoring H[k+1,k] and H[k+2,k]. If the fill + // resulting from the new reflector is too large, + // then abandon it. Otherwise, use the new one. + var vt [3]float64 + impl.Dlaqr1(3, h[(k+1)*ldh+k+1:], ldh, sr[2*m], + si[2*m], sr[2*m+1], si[2*m+1], vt[:]) + alpha := vt[0] + _, vt[0] = impl.Dlarfg(3, alpha, vt[1:3], 1) + refsum := vt[0] * (h[(k+1)*ldh+k] + vt[1]*h[(k+2)*ldh+k]) + + dsum := math.Abs(h[k*ldh+k]) + math.Abs(h[(k+1)*ldh+k+1]) + math.Abs(h[(k+2)*ldh+k+2]) + if math.Abs(h[(k+2)*ldh+k]-refsum*vt[1])+math.Abs(refsum*vt[2]) > ulp*dsum { + // Starting a new bulge here would create + // non-negligible fill. Use the old one with + // trepidation. + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + continue + } else { + // Starting a new bulge here would create + // only negligible fill. Replace the old + // reflector with the new one. + h[(k+1)*ldh+k] -= refsum + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + v[m*ldv] = vt[0] + v[m*ldv+1] = vt[1] + v[m*ldv+2] = vt[2] + } + } + + // Generate a 2×2 reflection, if needed. + if bmp22 { + k := krcol + 3*m22 + if k == ktop-1 { + impl.Dlaqr1(2, h[(k+1)*ldh+k+1:], ldh, + sr[2*m22], si[2*m22], sr[2*m22+1], si[2*m22+1], + v[m22*ldv:m22*ldv+2]) + beta := v[m22*ldv] + _, v[m22*ldv] = impl.Dlarfg(2, beta, v[m22*ldv+1:m22*ldv+2], 1) + } else { + beta := h[(k+1)*ldh+k] + v[m22*ldv+1] = h[(k+2)*ldh+k] + beta, v[m22*ldv] = impl.Dlarfg(2, beta, v[m22*ldv+1:m22*ldv+2], 1) + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + } + } + + // Multiply H by reflections from the left. + var jbot int + switch { + case accum: + jbot = min(ndcol, kbot) + case wantt: + jbot = n - 1 + default: + jbot = kbot + } + for j := max(ktop, krcol); j <= jbot; j++ { + mend := min(mbot+1, (j-krcol+2)/3) - 1 + for m := mtop; m <= mend; m++ { + k := krcol + 3*m + refsum := v[m*ldv] * (h[(k+1)*ldh+j] + + v[m*ldv+1]*h[(k+2)*ldh+j] + v[m*ldv+2]*h[(k+3)*ldh+j]) + h[(k+1)*ldh+j] -= refsum + h[(k+2)*ldh+j] -= refsum * v[m*ldv+1] + h[(k+3)*ldh+j] -= refsum * v[m*ldv+2] + } + } + if bmp22 { + k := krcol + 3*m22 + for j := max(k+1, ktop); j <= jbot; j++ { + refsum := v[m22*ldv] * (h[(k+1)*ldh+j] + v[m22*ldv+1]*h[(k+2)*ldh+j]) + h[(k+1)*ldh+j] -= refsum + h[(k+2)*ldh+j] -= refsum * v[m22*ldv+1] + } + } + + // Multiply H by reflections from the right. Delay filling in the last row + // until the vigilant deflation check is complete. + var jtop int + switch { + case accum: + jtop = max(ktop, incol) + case wantt: + jtop = 0 + default: + jtop = ktop + } + for m := mtop; m <= mbot; m++ { + if v[m*ldv] == 0 { + continue + } + k := krcol + 3*m + for j := jtop; j <= min(kbot, k+3); j++ { + refsum := v[m*ldv] * (h[j*ldh+k+1] + + v[m*ldv+1]*h[j*ldh+k+2] + v[m*ldv+2]*h[j*ldh+k+3]) + h[j*ldh+k+1] -= refsum + h[j*ldh+k+2] -= refsum * v[m*ldv+1] + h[j*ldh+k+3] -= refsum * v[m*ldv+2] + } + if accum { + // Accumulate U. (If necessary, update Z later with an + // efficient matrix-matrix multiply.) + kms := k - incol + for j := max(0, ktop-incol-1); j < kdu; j++ { + refsum := v[m*ldv] * (u[j*ldu+kms] + + v[m*ldv+1]*u[j*ldu+kms+1] + v[m*ldv+2]*u[j*ldu+kms+2]) + u[j*ldu+kms] -= refsum + u[j*ldu+kms+1] -= refsum * v[m*ldv+1] + u[j*ldu+kms+2] -= refsum * v[m*ldv+2] + } + } else if wantz { + // U is not accumulated, so update Z now by multiplying by + // reflections from the right. + for j := iloz; j <= ihiz; j++ { + refsum := v[m*ldv] * (z[j*ldz+k+1] + + v[m*ldv+1]*z[j*ldz+k+2] + v[m*ldv+2]*z[j*ldz+k+3]) + z[j*ldz+k+1] -= refsum + z[j*ldz+k+2] -= refsum * v[m*ldv+1] + z[j*ldz+k+3] -= refsum * v[m*ldv+2] + } + } + } + + // Special case: 2×2 reflection (if needed). + if bmp22 && v[m22*ldv] != 0 { + k := krcol + 3*m22 + for j := jtop; j <= min(kbot, k+3); j++ { + refsum := v[m22*ldv] * (h[j*ldh+k+1] + v[m22*ldv+1]*h[j*ldh+k+2]) + h[j*ldh+k+1] -= refsum + h[j*ldh+k+2] -= refsum * v[m22*ldv+1] + } + if accum { + kms := k - incol + for j := max(0, ktop-incol-1); j < kdu; j++ { + refsum := v[m22*ldv] * (u[j*ldu+kms] + v[m22*ldv+1]*u[j*ldu+kms+1]) + u[j*ldu+kms] -= refsum + u[j*ldu+kms+1] -= refsum * v[m22*ldv+1] + } + } else if wantz { + for j := iloz; j <= ihiz; j++ { + refsum := v[m22*ldv] * (z[j*ldz+k+1] + v[m22*ldv+1]*z[j*ldz+k+2]) + z[j*ldz+k+1] -= refsum + z[j*ldz+k+2] -= refsum * v[m22*ldv+1] + } + } + } + + // Vigilant deflation check. + mstart := mtop + if krcol+3*mstart < ktop { + mstart++ + } + mend := mbot + if bmp22 { + mend++ + } + if krcol == kbot-2 { + mend++ + } + for m := mstart; m <= mend; m++ { + k := min(kbot-1, krcol+3*m) + + // The following convergence test requires that the tradition + // small-compared-to-nearby-diagonals criterion and the Ahues & + // Tisseur (LAWN 122, 1997) criteria both be satisfied. The latter + // improves accuracy in some examples. Falling back on an alternate + // convergence criterion when tst1 or tst2 is zero (as done here) is + // traditional but probably unnecessary. + + if h[(k+1)*ldh+k] == 0 { + continue + } + tst1 := math.Abs(h[k*ldh+k]) + math.Abs(h[(k+1)*ldh+k+1]) + if tst1 == 0 { + if k >= ktop+1 { + tst1 += math.Abs(h[k*ldh+k-1]) + } + if k >= ktop+2 { + tst1 += math.Abs(h[k*ldh+k-2]) + } + if k >= ktop+3 { + tst1 += math.Abs(h[k*ldh+k-3]) + } + if k <= kbot-2 { + tst1 += math.Abs(h[(k+2)*ldh+k+1]) + } + if k <= kbot-3 { + tst1 += math.Abs(h[(k+3)*ldh+k+1]) + } + if k <= kbot-4 { + tst1 += math.Abs(h[(k+4)*ldh+k+1]) + } + } + if math.Abs(h[(k+1)*ldh+k]) <= math.Max(smlnum, ulp*tst1) { + h12 := math.Max(math.Abs(h[(k+1)*ldh+k]), math.Abs(h[k*ldh+k+1])) + h21 := math.Min(math.Abs(h[(k+1)*ldh+k]), math.Abs(h[k*ldh+k+1])) + h11 := math.Max(math.Abs(h[(k+1)*ldh+k+1]), math.Abs(h[k*ldh+k]-h[(k+1)*ldh+k+1])) + h22 := math.Min(math.Abs(h[(k+1)*ldh+k+1]), math.Abs(h[k*ldh+k]-h[(k+1)*ldh+k+1])) + scl := h11 + h12 + tst2 := h22 * (h11 / scl) + if tst2 == 0 || h21*(h12/scl) <= math.Max(smlnum, ulp*tst2) { + h[(k+1)*ldh+k] = 0 + } + } + } + + // Fill in the last row of each bulge. + mend = min(nbmps, (kbot-krcol-1)/3) - 1 + for m := mtop; m <= mend; m++ { + k := krcol + 3*m + refsum := v[m*ldv] * v[m*ldv+2] * h[(k+4)*ldh+k+3] + h[(k+4)*ldh+k+1] = -refsum + h[(k+4)*ldh+k+2] = -refsum * v[m*ldv+1] + h[(k+4)*ldh+k+3] -= refsum * v[m*ldv+2] + } + } + + // Use U (if accumulated) to update far-from-diagonal entries in H. + // If required, use U to update Z as well. + if !accum { + continue + } + var jtop, jbot int + if wantt { + jtop = 0 + jbot = n - 1 + } else { + jtop = ktop + jbot = kbot + } + bi := blas64.Implementation() + if !blk22 || incol < ktop || kbot < ndcol || ns <= 2 { + // Updates not exploiting the 2×2 block structure of U. k0 and nu keep track + // of the location and size of U in the special cases of introducing bulges + // and chasing bulges off the bottom. In these special cases and in case the + // number of shifts is ns = 2, there is no 2×2 block structure to exploit. + + k0 := max(0, ktop-incol-1) + nu := kdu - max(0, ndcol-kbot) - k0 + + // Horizontal multiply. + for jcol := min(ndcol, kbot) + 1; jcol <= jbot; jcol += nh { + jlen := min(nh, jbot-jcol+1) + bi.Dgemm(blas.Trans, blas.NoTrans, nu, jlen, nu, + 1, u[k0*ldu+k0:], ldu, + h[(incol+k0+1)*ldh+jcol:], ldh, + 0, wh, ldwh) + impl.Dlacpy(blas.All, nu, jlen, wh, ldwh, h[(incol+k0+1)*ldh+jcol:], ldh) + } + + // Vertical multiply. + for jrow := jtop; jrow <= max(ktop, incol)-1; jrow += nv { + jlen := min(nv, max(ktop, incol)-jrow) + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, nu, nu, + 1, h[jrow*ldh+incol+k0+1:], ldh, + u[k0*ldu+k0:], ldu, + 0, wv, ldwv) + impl.Dlacpy(blas.All, jlen, nu, wv, ldwv, h[jrow*ldh+incol+k0+1:], ldh) + } + + // Z multiply (also vertical). + if wantz { + for jrow := iloz; jrow <= ihiz; jrow += nv { + jlen := min(nv, ihiz-jrow+1) + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, nu, nu, + 1, z[jrow*ldz+incol+k0+1:], ldz, + u[k0*ldu+k0:], ldu, + 0, wv, ldwv) + impl.Dlacpy(blas.All, jlen, nu, wv, ldwv, z[jrow*ldz+incol+k0+1:], ldz) + } + } + + continue + } + + // Updates exploiting U's 2×2 block structure. + + // i2, i4, j2, j4 are the last rows and columns of the blocks. + i2 := (kdu + 1) / 2 + i4 := kdu + j2 := i4 - i2 + j4 := kdu + + // kzs and knz deal with the band of zeros along the diagonal of one of the + // triangular blocks. + kzs := (j4 - j2) - (ns + 1) + knz := ns + 1 + + // Horizontal multiply. + for jcol := min(ndcol, kbot) + 1; jcol <= jbot; jcol += nh { + jlen := min(nh, jbot-jcol+1) + + // Copy bottom of H to top+kzs of scratch (the first kzs + // rows get multiplied by zero). + impl.Dlacpy(blas.All, knz, jlen, h[(incol+1+j2)*ldh+jcol:], ldh, wh[kzs*ldwh:], ldwh) + + // Multiply by U21^T. + impl.Dlaset(blas.All, kzs, jlen, 0, 0, wh, ldwh) + bi.Dtrmm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, knz, jlen, + 1, u[j2*ldu+kzs:], ldu, wh[kzs*ldwh:], ldwh) + + // Multiply top of H by U11^T. + bi.Dgemm(blas.Trans, blas.NoTrans, i2, jlen, j2, + 1, u, ldu, h[(incol+1)*ldh+jcol:], ldh, + 1, wh, ldwh) + + // Copy top of H to bottom of WH. + impl.Dlacpy(blas.All, j2, jlen, h[(incol+1)*ldh+jcol:], ldh, wh[i2*ldwh:], ldwh) + + // Multiply by U21^T. + bi.Dtrmm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, j2, jlen, + 1, u[i2:], ldu, wh[i2*ldwh:], ldwh) + + // Multiply by U22. + bi.Dgemm(blas.Trans, blas.NoTrans, i4-i2, jlen, j4-j2, + 1, u[j2*ldu+i2:], ldu, h[(incol+1+j2)*ldh+jcol:], ldh, + 1, wh[i2*ldwh:], ldwh) + + // Copy it back. + impl.Dlacpy(blas.All, kdu, jlen, wh, ldwh, h[(incol+1)*ldh+jcol:], ldh) + } + + // Vertical multiply. + for jrow := jtop; jrow <= max(incol, ktop)-1; jrow += nv { + jlen := min(nv, max(incol, ktop)-jrow) + + // Copy right of H to scratch (the first kzs columns get multiplied + // by zero). + impl.Dlacpy(blas.All, jlen, knz, h[jrow*ldh+incol+1+j2:], ldh, wv[kzs:], ldwv) + + // Multiply by U21. + impl.Dlaset(blas.All, jlen, kzs, 0, 0, wv, ldwv) + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, jlen, knz, + 1, u[j2*ldu+kzs:], ldu, wv[kzs:], ldwv) + + // Multiply by U11. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i2, j2, + 1, h[jrow*ldh+incol+1:], ldh, u, ldu, + 1, wv, ldwv) + + // Copy left of H to right of scratch. + impl.Dlacpy(blas.All, jlen, j2, h[jrow*ldh+incol+1:], ldh, wv[i2:], ldwv) + + // Multiply by U21. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.NonUnit, jlen, i4-i2, + 1, u[i2:], ldu, wv[i2:], ldwv) + + // Multiply by U22. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i4-i2, j4-j2, + 1, h[jrow*ldh+incol+1+j2:], ldh, u[j2*ldu+i2:], ldu, + 1, wv[i2:], ldwv) + + // Copy it back. + impl.Dlacpy(blas.All, jlen, kdu, wv, ldwv, h[jrow*ldh+incol+1:], ldh) + } + + if !wantz { + continue + } + // Multiply Z (also vertical). + for jrow := iloz; jrow <= ihiz; jrow += nv { + jlen := min(nv, ihiz-jrow+1) + + // Copy right of Z to left of scratch (first kzs columns get + // multiplied by zero). + impl.Dlacpy(blas.All, jlen, knz, z[jrow*ldz+incol+1+j2:], ldz, wv[kzs:], ldwv) + + // Multiply by U12. + impl.Dlaset(blas.All, jlen, kzs, 0, 0, wv, ldwv) + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, jlen, knz, + 1, u[j2*ldu+kzs:], ldu, wv[kzs:], ldwv) + + // Multiply by U11. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i2, j2, + 1, z[jrow*ldz+incol+1:], ldz, u, ldu, + 1, wv, ldwv) + + // Copy left of Z to right of scratch. + impl.Dlacpy(blas.All, jlen, j2, z[jrow*ldz+incol+1:], ldz, wv[i2:], ldwv) + + // Multiply by U21. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.NonUnit, jlen, i4-i2, + 1, u[i2:], ldu, wv[i2:], ldwv) + + // Multiply by U22. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i4-i2, j4-j2, + 1, z[jrow*ldz+incol+1+j2:], ldz, u[j2*ldu+i2:], ldu, + 1, wv[i2:], ldwv) + + // Copy the result back to Z. + impl.Dlacpy(blas.All, jlen, kdu, wv, ldwv, z[jrow*ldz+incol+1:], ldz) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go new file mode 100644 index 00000000..9fc97a32 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go @@ -0,0 +1,101 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlarf applies an elementary reflector to a general rectangular matrix c. +// This computes +// c = h * c if side == Left +// c = c * h if side == right +// where +// h = 1 - tau * v * v^T +// and c is an m * n matrix. +// +// work is temporary storage of length at least m if side == Left and at least +// n if side == Right. This function will panic if this length requirement is not met. +// +// Dlarf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarf(side blas.Side, m, n int, v []float64, incv int, tau float64, c []float64, ldc int, work []float64) { + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case incv == 0: + panic(zeroIncV) + case ldc < max(1, n): + panic(badLdC) + } + + if m == 0 || n == 0 { + return + } + + applyleft := side == blas.Left + lenV := n + if applyleft { + lenV = m + } + + switch { + case len(v) < 1+(lenV-1)*abs(incv): + panic(shortV) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case (applyleft && len(work) < n) || (!applyleft && len(work) < m): + panic(shortWork) + } + + lastv := 0 // last non-zero element of v + lastc := 0 // last non-zero row/column of c + if tau != 0 { + var i int + if applyleft { + lastv = m - 1 + } else { + lastv = n - 1 + } + if incv > 0 { + i = lastv * incv + } + + // Look for the last non-zero row in v. + for lastv >= 0 && v[i] == 0 { + lastv-- + i -= incv + } + if applyleft { + // Scan for the last non-zero column in C[0:lastv, :] + lastc = impl.Iladlc(lastv+1, n, c, ldc) + } else { + // Scan for the last non-zero row in C[:, 0:lastv] + lastc = impl.Iladlr(m, lastv+1, c, ldc) + } + } + if lastv == -1 || lastc == -1 { + return + } + // Sometimes 1-indexing is nicer ... + bi := blas64.Implementation() + if applyleft { + // Form H * C + // w[0:lastc+1] = c[1:lastv+1, 1:lastc+1]^T * v[1:lastv+1,1] + bi.Dgemv(blas.Trans, lastv+1, lastc+1, 1, c, ldc, v, incv, 0, work, 1) + // c[0: lastv, 0: lastc] = c[...] - w[0:lastv, 1] * v[1:lastc, 1]^T + bi.Dger(lastv+1, lastc+1, -tau, v, incv, work, 1, c, ldc) + return + } + // Form C*H + // w[0:lastc+1,1] := c[0:lastc+1,0:lastv+1] * v[0:lastv+1,1] + bi.Dgemv(blas.NoTrans, lastc+1, lastv+1, 1, c, ldc, v, incv, 0, work, 1) + // c[0:lastc+1,0:lastv+1] = c[...] - w[0:lastc+1,0] * v[0:lastv+1,0]^T + bi.Dger(lastc+1, lastv+1, -tau, work, 1, v, incv, c, ldc) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go new file mode 100644 index 00000000..4dd8e063 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go @@ -0,0 +1,449 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlarfb applies a block reflector to a matrix. +// +// In the call to Dlarfb, the mxn c is multiplied by the implicitly defined matrix h as follows: +// c = h * c if side == Left and trans == NoTrans +// c = c * h if side == Right and trans == NoTrans +// c = h^T * c if side == Left and trans == Trans +// c = c * h^T if side == Right and trans == Trans +// h is a product of elementary reflectors. direct sets the direction of multiplication +// h = h_1 * h_2 * ... * h_k if direct == Forward +// h = h_k * h_k-1 * ... * h_1 if direct == Backward +// The combination of direct and store defines the orientation of the elementary +// reflectors. In all cases the ones on the diagonal are implicitly represented. +// +// If direct == lapack.Forward and store == lapack.ColumnWise +// V = [ 1 ] +// [v1 1 ] +// [v1 v2 1] +// [v1 v2 v3] +// [v1 v2 v3] +// If direct == lapack.Forward and store == lapack.RowWise +// V = [ 1 v1 v1 v1 v1] +// [ 1 v2 v2 v2] +// [ 1 v3 v3] +// If direct == lapack.Backward and store == lapack.ColumnWise +// V = [v1 v2 v3] +// [v1 v2 v3] +// [ 1 v2 v3] +// [ 1 v3] +// [ 1] +// If direct == lapack.Backward and store == lapack.RowWise +// V = [v1 v1 1 ] +// [v2 v2 v2 1 ] +// [v3 v3 v3 v3 1] +// An elementary reflector can be explicitly constructed by extracting the +// corresponding elements of v, placing a 1 where the diagonal would be, and +// placing zeros in the remaining elements. +// +// t is a k×k matrix containing the block reflector, and this function will panic +// if t is not of sufficient size. See Dlarft for more information. +// +// work is a temporary storage matrix with stride ldwork. +// work must be of size at least n×k side == Left and m×k if side == Right, and +// this function will panic if this size is not met. +// +// Dlarfb is an internal routine. It is exported for testing purposes. +func (Implementation) Dlarfb(side blas.Side, trans blas.Transpose, direct lapack.Direct, store lapack.StoreV, m, n, k int, v []float64, ldv int, t []float64, ldt int, c []float64, ldc int, work []float64, ldwork int) { + nv := m + if side == blas.Right { + nv = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case trans != blas.Trans && trans != blas.NoTrans: + panic(badTrans) + case direct != lapack.Forward && direct != lapack.Backward: + panic(badDirect) + case store != lapack.ColumnWise && store != lapack.RowWise: + panic(badStoreV) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case store == lapack.ColumnWise && ldv < max(1, k): + panic(badLdV) + case store == lapack.RowWise && ldv < max(1, nv): + panic(badLdV) + case ldt < max(1, k): + panic(badLdT) + case ldc < max(1, n): + panic(badLdC) + case ldwork < max(1, k): + panic(badLdWork) + } + + if m == 0 || n == 0 { + return + } + + nw := n + if side == blas.Right { + nw = m + } + switch { + case store == lapack.ColumnWise && len(v) < (nv-1)*ldv+k: + panic(shortV) + case store == lapack.RowWise && len(v) < (k-1)*ldv+nv: + panic(shortV) + case len(t) < (k-1)*ldt+k: + panic(shortT) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case len(work) < (nw-1)*ldwork+k: + panic(shortWork) + } + + bi := blas64.Implementation() + + transt := blas.Trans + if trans == blas.Trans { + transt = blas.NoTrans + } + // TODO(btracey): This follows the original Lapack code where the + // elements are copied into the columns of the working array. The + // loops should go in the other direction so the data is written + // into the rows of work so the copy is not strided. A bigger change + // would be to replace work with work^T, but benchmarks would be + // needed to see if the change is merited. + if store == lapack.ColumnWise { + if direct == lapack.Forward { + // V1 is the first k rows of C. V2 is the remaining rows. + if side == blas.Left { + // W = C^T V = C1^T V1 + C2^T V2 (stored in work). + + // W = C1. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) + } + // W = W * V1. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, + n, k, 1, + v, ldv, + work, ldwork) + if m > k { + // W = W + C2^T V2. + bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, + 1, c[k*ldc:], ldc, v[k*ldv:], ldv, + 1, work, ldwork) + } + // W = W * T^T or W * T. + bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V * W^T. + if m > k { + // C2 -= V2 * W^T. + bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, + -1, v[k*ldv:], ldv, work, ldwork, + 1, c[k*ldc:], ldc) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + // C1 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[j*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C = C * H or C * H^T, where C = (C1 C2). + + // W = C1. + for i := 0; i < k; i++ { + bi.Dcopy(m, c[i:], ldc, work[i:], ldwork) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, + 1, c[k:], ldc, v[k*ldv:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, + -1, work, ldwork, v[k*ldv:], ldv, + 1, c[k:], ldc) + } + // C -= W * V^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + // C -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+j] -= work[i*ldwork+j] + } + } + return + } + // V = (V1) + // = (V2) (last k rows) + // Where V2 is unit upper triangular. + if side == blas.Left { + // Form H * C or + // W = C^T V. + + // W = C2^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, + 1, v[(m-k)*ldv:], ldv, + work, ldwork) + if m > k { + // W += C1^T * V1. + bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V * W^T. + if m > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, + -1, v, ldv, work, ldwork, + 1, c, ldc) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, + 1, v[(m-k)*ldv:], ldv, + work, ldwork) + // C2 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[(m-k+j)*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V. + + // W = C2. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) + } + + // W = W * V2. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, + 1, v[(n-k)*ldv:], ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V^T. + if n > k { + // C1 -= W * V1^T. + bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, + -1, work, ldwork, v, ldv, + 1, c, ldc) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, + 1, v[(n-k)*ldv:], ldv, + work, ldwork) + // C2 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+n-k+j] -= work[i*ldwork+j] + } + } + return + } + // Store = Rowwise. + if direct == lapack.Forward { + // V = (V1 V2) where v1 is unit upper triangular. + if side == blas.Left { + // Form H * C or H^T * C where C = (C1; C2). + // W = C^T * V^T. + + // W = C1^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, + 1, c[k*ldc:], ldc, v[k:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V^T * W^T. + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, + -1, v[k:], ldv, work, ldwork, + 1, c[k*ldc:], ldc) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + // C1 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[j*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V^T. + + // W = C1. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[j:], ldc, work[j:], ldwork) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, + 1, c[k:], ldc, v[k:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V. + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, + -1, work, ldwork, v[k:], ldv, + 1, c[k:], ldc) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + // C1 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+j] -= work[i*ldwork+j] + } + } + return + } + // V = (V1 V2) where V2 is the last k columns and is lower unit triangular. + if side == blas.Left { + // Form H * C or H^T C where C = (C1 ; C2). + // W = C^T * V^T. + + // W = C2^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, + 1, v[m-k:], ldv, + work, ldwork) + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V^T * W^T. + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, + -1, v, ldv, work, ldwork, + 1, c, ldc) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, k, + 1, v[m-k:], ldv, + work, ldwork) + // C2 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[(m-k+j)*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V^T. + // W = C2. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, + 1, v[n-k:], ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V. + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, + -1, work, ldwork, v, ldv, + 1, c, ldc) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, + 1, v[n-k:], ldv, + work, ldwork) + // C1 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+n-k+j] -= work[i*ldwork+j] + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go new file mode 100644 index 00000000..e037fdd6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go @@ -0,0 +1,71 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlarfg generates an elementary reflector for a Householder matrix. It creates +// a real elementary reflector of order n such that +// H * (alpha) = (beta) +// ( x) ( 0) +// H^T * H = I +// H is represented in the form +// H = 1 - tau * (1; v) * (1 v^T) +// where tau is a real scalar. +// +// On entry, x contains the vector x, on exit it contains v. +// +// Dlarfg is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarfg(n int, alpha float64, x []float64, incX int) (beta, tau float64) { + switch { + case n < 0: + panic(nLT0) + case incX <= 0: + panic(badIncX) + } + + if n <= 1 { + return alpha, 0 + } + + if len(x) < 1+(n-2)*abs(incX) { + panic(shortX) + } + + bi := blas64.Implementation() + + xnorm := bi.Dnrm2(n-1, x, incX) + if xnorm == 0 { + return alpha, 0 + } + beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) + safmin := dlamchS / dlamchE + knt := 0 + if math.Abs(beta) < safmin { + // xnorm and beta may be inaccurate, scale x and recompute. + rsafmn := 1 / safmin + for { + knt++ + bi.Dscal(n-1, rsafmn, x, incX) + beta *= rsafmn + alpha *= rsafmn + if math.Abs(beta) >= safmin { + break + } + } + xnorm = bi.Dnrm2(n-1, x, incX) + beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) + } + tau = (beta - alpha) / beta + bi.Dscal(n-1, 1/(alpha-beta), x, incX) + for j := 0; j < knt; j++ { + beta *= safmin + } + return beta, tau +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go new file mode 100644 index 00000000..8f03eb8b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go @@ -0,0 +1,166 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlarft forms the triangular factor T of a block reflector H, storing the answer +// in t. +// H = I - V * T * V^T if store == lapack.ColumnWise +// H = I - V^T * T * V if store == lapack.RowWise +// H is defined by a product of the elementary reflectors where +// H = H_0 * H_1 * ... * H_{k-1} if direct == lapack.Forward +// H = H_{k-1} * ... * H_1 * H_0 if direct == lapack.Backward +// +// t is a k×k triangular matrix. t is upper triangular if direct = lapack.Forward +// and lower triangular otherwise. This function will panic if t is not of +// sufficient size. +// +// store describes the storage of the elementary reflectors in v. See +// Dlarfb for a description of layout. +// +// tau contains the scalar factors of the elementary reflectors H_i. +// +// Dlarft is an internal routine. It is exported for testing purposes. +func (Implementation) Dlarft(direct lapack.Direct, store lapack.StoreV, n, k int, v []float64, ldv int, tau []float64, t []float64, ldt int) { + mv, nv := n, k + if store == lapack.RowWise { + mv, nv = k, n + } + switch { + case direct != lapack.Forward && direct != lapack.Backward: + panic(badDirect) + case store != lapack.RowWise && store != lapack.ColumnWise: + panic(badStoreV) + case n < 0: + panic(nLT0) + case k < 1: + panic(kLT1) + case ldv < max(1, nv): + panic(badLdV) + case len(tau) < k: + panic(shortTau) + case ldt < max(1, k): + panic(shortT) + } + + if n == 0 { + return + } + + switch { + case len(v) < (mv-1)*ldv+nv: + panic(shortV) + case len(t) < (k-1)*ldt+k: + panic(shortT) + } + + bi := blas64.Implementation() + + // TODO(btracey): There are a number of minor obvious loop optimizations here. + // TODO(btracey): It may be possible to rearrange some of the code so that + // index of 1 is more common in the Dgemv. + if direct == lapack.Forward { + prevlastv := n - 1 + for i := 0; i < k; i++ { + prevlastv = max(i, prevlastv) + if tau[i] == 0 { + for j := 0; j <= i; j++ { + t[j*ldt+i] = 0 + } + continue + } + var lastv int + if store == lapack.ColumnWise { + // skip trailing zeros + for lastv = n - 1; lastv >= i+1; lastv-- { + if v[lastv*ldv+i] != 0 { + break + } + } + for j := 0; j < i; j++ { + t[j*ldt+i] = -tau[i] * v[i*ldv+j] + } + j := min(lastv, prevlastv) + bi.Dgemv(blas.Trans, j-i, i, + -tau[i], v[(i+1)*ldv:], ldv, v[(i+1)*ldv+i:], ldv, + 1, t[i:], ldt) + } else { + for lastv = n - 1; lastv >= i+1; lastv-- { + if v[i*ldv+lastv] != 0 { + break + } + } + for j := 0; j < i; j++ { + t[j*ldt+i] = -tau[i] * v[j*ldv+i] + } + j := min(lastv, prevlastv) + bi.Dgemv(blas.NoTrans, i, j-i, + -tau[i], v[i+1:], ldv, v[i*ldv+i+1:], 1, + 1, t[i:], ldt) + } + bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, t, ldt, t[i:], ldt) + t[i*ldt+i] = tau[i] + if i > 1 { + prevlastv = max(prevlastv, lastv) + } else { + prevlastv = lastv + } + } + return + } + prevlastv := 0 + for i := k - 1; i >= 0; i-- { + if tau[i] == 0 { + for j := i; j < k; j++ { + t[j*ldt+i] = 0 + } + continue + } + var lastv int + if i < k-1 { + if store == lapack.ColumnWise { + for lastv = 0; lastv < i; lastv++ { + if v[lastv*ldv+i] != 0 { + break + } + } + for j := i + 1; j < k; j++ { + t[j*ldt+i] = -tau[i] * v[(n-k+i)*ldv+j] + } + j := max(lastv, prevlastv) + bi.Dgemv(blas.Trans, n-k+i-j, k-i-1, + -tau[i], v[j*ldv+i+1:], ldv, v[j*ldv+i:], ldv, + 1, t[(i+1)*ldt+i:], ldt) + } else { + for lastv = 0; lastv < i; lastv++ { + if v[i*ldv+lastv] != 0 { + break + } + } + for j := i + 1; j < k; j++ { + t[j*ldt+i] = -tau[i] * v[j*ldv+n-k+i] + } + j := max(lastv, prevlastv) + bi.Dgemv(blas.NoTrans, k-i-1, n-k+i-j, + -tau[i], v[(i+1)*ldv+j:], ldv, v[i*ldv+j:], 1, + 1, t[(i+1)*ldt+i:], ldt) + } + bi.Dtrmv(blas.Lower, blas.NoTrans, blas.NonUnit, k-i-1, + t[(i+1)*ldt+i+1:], ldt, + t[(i+1)*ldt+i:], ldt) + if i > 0 { + prevlastv = min(prevlastv, lastv) + } else { + prevlastv = lastv + } + } + t[i*ldt+i] = tau[i] + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go new file mode 100644 index 00000000..d7928c8c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go @@ -0,0 +1,550 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlarfx applies an elementary reflector H to a real m×n matrix C, from either +// the left or the right, with loop unrolling when the reflector has order less +// than 11. +// +// H is represented in the form +// H = I - tau * v * v^T, +// where tau is a real scalar and v is a real vector. If tau = 0, then H is +// taken to be the identity matrix. +// +// v must have length equal to m if side == blas.Left, and equal to n if side == +// blas.Right, otherwise Dlarfx will panic. +// +// c and ldc represent the m×n matrix C. On return, C is overwritten by the +// matrix H * C if side == blas.Left, or C * H if side == blas.Right. +// +// work must have length at least n if side == blas.Left, and at least m if side +// == blas.Right, otherwise Dlarfx will panic. work is not referenced if H has +// order < 11. +// +// Dlarfx is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarfx(side blas.Side, m, n int, v []float64, tau float64, c []float64, ldc int, work []float64) { + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + nh := m + lwork := n + if side == blas.Right { + nh = n + lwork = m + } + switch { + case len(v) < nh: + panic(shortV) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case nh > 10 && len(work) < lwork: + panic(shortWork) + } + + if tau == 0 { + return + } + + if side == blas.Left { + // Form H * C, where H has order m. + switch m { + default: // Code for general m. + impl.Dlarf(side, m, n, v, 1, tau, c, ldc, work) + return + + case 0: // No-op for zero size matrix. + return + + case 1: // Special code for 1×1 Householder matrix. + t0 := 1 - tau*v[0]*v[0] + for j := 0; j < n; j++ { + c[j] *= t0 + } + return + + case 2: // Special code for 2×2 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + } + return + + case 3: // Special code for 3×3 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + } + return + + case 4: // Special code for 4×4 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + } + return + + case 5: // Special code for 5×5 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + } + return + + case 6: // Special code for 6×6 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + } + return + + case 7: // Special code for 7×7 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + } + return + + case 8: // Special code for 8×8 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + } + return + + case 9: // Special code for 9×9 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + v8*c[8*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + c[8*ldc+j] -= sum * t8 + } + return + + case 10: // Special code for 10×10 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + v9 := v[9] + t9 := tau * v9 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + v8*c[8*ldc+j] + v9*c[9*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + c[8*ldc+j] -= sum * t8 + c[9*ldc+j] -= sum * t9 + } + return + } + } + + // Form C * H, where H has order n. + switch n { + default: // Code for general n. + impl.Dlarf(side, m, n, v, 1, tau, c, ldc, work) + return + + case 0: // No-op for zero size matrix. + return + + case 1: // Special code for 1×1 Householder matrix. + t0 := 1 - tau*v[0]*v[0] + for j := 0; j < m; j++ { + c[j*ldc] *= t0 + } + return + + case 2: // Special code for 2×2 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + } + return + + case 3: // Special code for 3×3 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + } + return + + case 4: // Special code for 4×4 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + } + return + + case 5: // Special code for 5×5 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + } + return + + case 6: // Special code for 6×6 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + v5*cs[5] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + } + return + + case 7: // Special code for 7×7 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + } + return + + case 8: // Special code for 8×8 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + } + return + + case 9: // Special code for 9×9 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + v8*cs[8] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + cs[8] -= sum * t8 + } + return + + case 10: // Special code for 10×10 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + v9 := v[9] + t9 := tau * v9 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + v8*cs[8] + v9*cs[9] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + cs[8] -= sum * t8 + cs[9] -= sum * t9 + } + return + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go new file mode 100644 index 00000000..ad645461 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go @@ -0,0 +1,80 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlartg generates a plane rotation so that +// [ cs sn] * [f] = [r] +// [-sn cs] [g] = [0] +// This is a more accurate version of BLAS drotg, with the other differences that +// if g = 0, then cs = 1 and sn = 0, and if f = 0 and g != 0, then cs = 0 and sn = 1. +// If abs(f) > abs(g), cs will be positive. +// +// Dlartg is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlartg(f, g float64) (cs, sn, r float64) { + safmn2 := math.Pow(dlamchB, math.Trunc(math.Log(dlamchS/dlamchE)/math.Log(dlamchB)/2)) + safmx2 := 1 / safmn2 + if g == 0 { + cs = 1 + sn = 0 + r = f + return cs, sn, r + } + if f == 0 { + cs = 0 + sn = 1 + r = g + return cs, sn, r + } + f1 := f + g1 := g + scale := math.Max(math.Abs(f1), math.Abs(g1)) + if scale >= safmx2 { + var count int + for { + count++ + f1 *= safmn2 + g1 *= safmn2 + scale = math.Max(math.Abs(f1), math.Abs(g1)) + if scale < safmx2 { + break + } + } + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + for i := 0; i < count; i++ { + r *= safmx2 + } + } else if scale <= safmn2 { + var count int + for { + count++ + f1 *= safmx2 + g1 *= safmx2 + scale = math.Max(math.Abs(f1), math.Abs(g1)) + if scale >= safmn2 { + break + } + } + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + for i := 0; i < count; i++ { + r *= safmn2 + } + } else { + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + } + if math.Abs(f) > math.Abs(g) && cs < 0 { + cs *= -1 + sn *= -1 + r *= -1 + } + return cs, sn, r +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go new file mode 100644 index 00000000..9922b4aa --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go @@ -0,0 +1,43 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlas2 computes the singular values of the 2×2 matrix defined by +// [F G] +// [0 H] +// The smaller and larger singular values are returned in that order. +// +// Dlas2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlas2(f, g, h float64) (ssmin, ssmax float64) { + fa := math.Abs(f) + ga := math.Abs(g) + ha := math.Abs(h) + fhmin := math.Min(fa, ha) + fhmax := math.Max(fa, ha) + if fhmin == 0 { + if fhmax == 0 { + return 0, ga + } + v := math.Min(fhmax, ga) / math.Max(fhmax, ga) + return 0, math.Max(fhmax, ga) * math.Sqrt(1+v*v) + } + if ga < fhmax { + as := 1 + fhmin/fhmax + at := (fhmax - fhmin) / fhmax + au := (ga / fhmax) * (ga / fhmax) + c := 2 / (math.Sqrt(as*as+au) + math.Sqrt(at*at+au)) + return fhmin * c, fhmax / c + } + au := fhmax / ga + if au == 0 { + return fhmin * fhmax / ga, ga + } + as := 1 + fhmin/fhmax + at := (fhmax - fhmin) / fhmax + c := 1 / (math.Sqrt(1+(as*au)*(as*au)) + math.Sqrt(1+(at*au)*(at*au))) + return 2 * (fhmin * c) * au, ga / (c + c) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go new file mode 100644 index 00000000..61c4eb79 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go @@ -0,0 +1,111 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlascl multiplies an m×n matrix by the scalar cto/cfrom. +// +// cfrom must not be zero, and cto and cfrom must not be NaN, otherwise Dlascl +// will panic. +// +// Dlascl is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlascl(kind lapack.MatrixType, kl, ku int, cfrom, cto float64, m, n int, a []float64, lda int) { + switch kind { + default: + panic(badMatrixType) + case 'H', 'B', 'Q', 'Z': // See dlascl.f. + panic("not implemented") + case lapack.General, lapack.UpperTri, lapack.LowerTri: + if lda < max(1, n) { + panic(badLdA) + } + } + switch { + case cfrom == 0: + panic(zeroCFrom) + case math.IsNaN(cfrom): + panic(nanCFrom) + case math.IsNaN(cto): + panic(nanCTo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + } + + if n == 0 || m == 0 { + return + } + + switch kind { + case lapack.General, lapack.UpperTri, lapack.LowerTri: + if len(a) < (m-1)*lda+n { + panic(shortA) + } + } + + smlnum := dlamchS + bignum := 1 / smlnum + cfromc := cfrom + ctoc := cto + cfrom1 := cfromc * smlnum + for { + var done bool + var mul, ctol float64 + if cfrom1 == cfromc { + // cfromc is inf. + mul = ctoc / cfromc + done = true + ctol = ctoc + } else { + ctol = ctoc / bignum + if ctol == ctoc { + // ctoc is either 0 or inf. + mul = ctoc + done = true + cfromc = 1 + } else if math.Abs(cfrom1) > math.Abs(ctoc) && ctoc != 0 { + mul = smlnum + done = false + cfromc = cfrom1 + } else if math.Abs(ctol) > math.Abs(cfromc) { + mul = bignum + done = false + ctoc = ctol + } else { + mul = ctoc / cfromc + done = true + } + } + switch kind { + case lapack.General: + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + case lapack.UpperTri: + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + case lapack.LowerTri: + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + } + if done { + break + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go new file mode 100644 index 00000000..b4e63916 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go @@ -0,0 +1,57 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlaset sets the off-diagonal elements of A to alpha, and the diagonal +// elements to beta. If uplo == blas.Upper, only the elements in the upper +// triangular part are set. If uplo == blas.Lower, only the elements in the +// lower triangular part are set. If uplo is otherwise, all of the elements of A +// are set. +// +// Dlaset is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaset(uplo blas.Uplo, m, n int, alpha, beta float64, a []float64, lda int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + minmn := min(m, n) + if minmn == 0 { + return + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + a[i*lda+j] = alpha + } + } + } else if uplo == blas.Lower { + for i := 0; i < m; i++ { + for j := 0; j < min(i+1, n); j++ { + a[i*lda+j] = alpha + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = alpha + } + } + } + for i := 0; i < minmn; i++ { + a[i*lda+i] = beta + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go new file mode 100644 index 00000000..1f1d1dc4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go @@ -0,0 +1,100 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlasq1 computes the singular values of an n×n bidiagonal matrix with diagonal +// d and off-diagonal e. On exit, d contains the singular values in decreasing +// order, and e is overwritten. d must have length at least n, e must have +// length at least n-1, and the input work must have length at least 4*n. Dlasq1 +// will panic if these conditions are not met. +// +// Dlasq1 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq1(n int, d, e, work []float64) (info int) { + if n < 0 { + panic(nLT0) + } + + if n == 0 { + return info + } + + switch { + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + case len(work) < 4*n: + panic(shortWork) + } + + if n == 1 { + d[0] = math.Abs(d[0]) + return info + } + + if n == 2 { + d[1], d[0] = impl.Dlas2(d[0], e[0], d[1]) + return info + } + + // Estimate the largest singular value. + var sigmx float64 + for i := 0; i < n-1; i++ { + d[i] = math.Abs(d[i]) + sigmx = math.Max(sigmx, math.Abs(e[i])) + } + d[n-1] = math.Abs(d[n-1]) + // Early return if sigmx is zero (matrix is already diagonal). + if sigmx == 0 { + impl.Dlasrt(lapack.SortDecreasing, n, d) + return info + } + + for i := 0; i < n; i++ { + sigmx = math.Max(sigmx, d[i]) + } + + // Copy D and E into WORK (in the Z format) and scale (squaring the + // input data makes scaling by a power of the radix pointless). + + eps := dlamchP + safmin := dlamchS + scale := math.Sqrt(eps / safmin) + bi := blas64.Implementation() + bi.Dcopy(n, d, 1, work, 2) + bi.Dcopy(n-1, e, 1, work[1:], 2) + impl.Dlascl(lapack.General, 0, 0, sigmx, scale, 2*n-1, 1, work, 1) + + // Compute the q's and e's. + for i := 0; i < 2*n-1; i++ { + work[i] *= work[i] + } + work[2*n-1] = 0 + + info = impl.Dlasq2(n, work) + if info == 0 { + for i := 0; i < n; i++ { + d[i] = math.Sqrt(work[i]) + } + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, d, 1) + } else if info == 2 { + // Maximum number of iterations exceeded. Move data from work + // into D and E so the calling subroutine can try to finish. + for i := 0; i < n; i++ { + d[i] = math.Sqrt(work[2*i]) + e[i] = math.Sqrt(work[2*i+1]) + } + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, d, 1) + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, e, 1) + } + return info +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go new file mode 100644 index 00000000..fd24a550 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go @@ -0,0 +1,369 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlasq2 computes all the eigenvalues of the symmetric positive +// definite tridiagonal matrix associated with the qd array Z. Eigevalues +// are computed to high relative accuracy avoiding denormalization, underflow +// and overflow. +// +// To see the relation of Z to the tridiagonal matrix, let L be a +// unit lower bidiagonal matrix with sub-diagonals Z(2,4,6,,..) and +// let U be an upper bidiagonal matrix with 1's above and diagonal +// Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the +// symmetric tridiagonal to which it is similar. +// +// info returns a status error. The return codes mean as follows: +// 0: The algorithm completed successfully. +// 1: A split was marked by a positive value in e. +// 2: Current block of Z not diagonalized after 100*n iterations (in inner +// while loop). On exit Z holds a qd array with the same eigenvalues as +// the given Z. +// 3: Termination criterion of outer while loop not met (program created more +// than N unreduced blocks). +// +// z must have length at least 4*n, and must not contain any negative elements. +// Dlasq2 will panic otherwise. +// +// Dlasq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq2(n int, z []float64) (info int) { + if n < 0 { + panic(nLT0) + } + + if n == 0 { + return info + } + + if len(z) < 4*n { + panic(shortZ) + } + + if n == 1 { + if z[0] < 0 { + panic(negZ) + } + return info + } + + const cbias = 1.5 + + eps := dlamchP + safmin := dlamchS + tol := eps * 100 + tol2 := tol * tol + if n == 2 { + if z[1] < 0 || z[2] < 0 { + panic(negZ) + } else if z[2] > z[0] { + z[0], z[2] = z[2], z[0] + } + z[4] = z[0] + z[1] + z[2] + if z[1] > z[2]*tol2 { + t := 0.5 * (z[0] - z[2] + z[1]) + s := z[2] * (z[1] / t) + if s <= t { + s = z[2] * (z[1] / (t * (1 + math.Sqrt(1+s/t)))) + } else { + s = z[2] * (z[1] / (t + math.Sqrt(t)*math.Sqrt(t+s))) + } + t = z[0] + s + z[1] + z[2] *= z[0] / t + z[0] = t + } + z[1] = z[2] + z[5] = z[1] + z[0] + return info + } + // Check for negative data and compute sums of q's and e's. + z[2*n-1] = 0 + emin := z[1] + var d, e, qmax float64 + var i1, n1 int + for k := 0; k < 2*(n-1); k += 2 { + if z[k] < 0 || z[k+1] < 0 { + panic(negZ) + } + d += z[k] + e += z[k+1] + qmax = math.Max(qmax, z[k]) + emin = math.Min(emin, z[k+1]) + } + if z[2*(n-1)] < 0 { + panic(negZ) + } + d += z[2*(n-1)] + // Check for diagonality. + if e == 0 { + for k := 1; k < n; k++ { + z[k] = z[2*k] + } + impl.Dlasrt(lapack.SortDecreasing, n, z) + z[2*(n-1)] = d + return info + } + trace := d + e + // Check for zero data. + if trace == 0 { + z[2*(n-1)] = 0 + return info + } + // Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). + for k := 2 * n; k >= 2; k -= 2 { + z[2*k-1] = 0 + z[2*k-2] = z[k-1] + z[2*k-3] = 0 + z[2*k-4] = z[k-2] + } + i0 := 0 + n0 := n - 1 + + // Reverse the qd-array, if warranted. + // z[4*i0-3] --> z[4*(i0+1)-3-1] --> z[4*i0] + if cbias*z[4*i0] < z[4*n0] { + ipn4Out := 4 * (i0 + n0 + 2) + for i4loop := 4 * (i0 + 1); i4loop <= 2*(i0+n0+1); i4loop += 4 { + i4 := i4loop - 1 + ipn4 := ipn4Out - 1 + z[i4-3], z[ipn4-i4-4] = z[ipn4-i4-4], z[i4-3] + z[i4-1], z[ipn4-i4-6] = z[ipn4-i4-6], z[i4-1] + } + } + + // Initial split checking via dqd and Li's test. + pp := 0 + for k := 0; k < 2; k++ { + d = z[4*n0+pp] + for i4loop := 4*n0 + pp; i4loop >= 4*(i0+1)+pp; i4loop -= 4 { + i4 := i4loop - 1 + if z[i4-1] <= tol2*d { + z[i4-1] = math.Copysign(0, -1) + d = z[i4-3] + } else { + d = z[i4-3] * (d / (d + z[i4-1])) + } + } + // dqd maps Z to ZZ plus Li's test. + emin = z[4*(i0+1)+pp] + d = z[4*i0+pp] + for i4loop := 4*(i0+1) + pp; i4loop <= 4*n0+pp; i4loop += 4 { + i4 := i4loop - 1 + z[i4-2*pp-2] = d + z[i4-1] + if z[i4-1] <= tol2*d { + z[i4-1] = math.Copysign(0, -1) + z[i4-2*pp-2] = d + z[i4-2*pp] = 0 + d = z[i4+1] + } else if safmin*z[i4+1] < z[i4-2*pp-2] && safmin*z[i4-2*pp-2] < z[i4+1] { + tmp := z[i4+1] / z[i4-2*pp-2] + z[i4-2*pp] = z[i4-1] * tmp + d *= tmp + } else { + z[i4-2*pp] = z[i4+1] * (z[i4-1] / z[i4-2*pp-2]) + d = z[i4+1] * (d / z[i4-2*pp-2]) + } + emin = math.Min(emin, z[i4-2*pp]) + } + z[4*(n0+1)-pp-3] = d + + // Now find qmax. + qmax = z[4*(i0+1)-pp-3] + for i4loop := 4*(i0+1) - pp + 2; i4loop <= 4*(n0+1)+pp-2; i4loop += 4 { + i4 := i4loop - 1 + qmax = math.Max(qmax, z[i4]) + } + // Prepare for the next iteration on K. + pp = 1 - pp + } + + // Initialise variables to pass to DLASQ3. + var ttype int + var dmin1, dmin2, dn, dn1, dn2, g, tau float64 + var tempq float64 + iter := 2 + var nFail int + nDiv := 2 * (n0 - i0) + var i4 int +outer: + for iwhila := 1; iwhila <= n+1; iwhila++ { + // Test for completion. + if n0 < 0 { + // Move q's to the front. + for k := 1; k < n; k++ { + z[k] = z[4*k] + } + // Sort and compute sum of eigenvalues. + impl.Dlasrt(lapack.SortDecreasing, n, z) + e = 0 + for k := n - 1; k >= 0; k-- { + e += z[k] + } + // Store trace, sum(eigenvalues) and information on performance. + z[2*n] = trace + z[2*n+1] = e + z[2*n+2] = float64(iter) + z[2*n+3] = float64(nDiv) / float64(n*n) + z[2*n+4] = 100 * float64(nFail) / float64(iter) + return info + } + + // While array unfinished do + // e[n0] holds the value of sigma when submatrix in i0:n0 + // splits from the rest of the array, but is negated. + var desig float64 + var sigma float64 + if n0 != n-1 { + sigma = -z[4*(n0+1)-2] + } + if sigma < 0 { + info = 1 + return info + } + // Find last unreduced submatrix's top index i0, find qmax and + // emin. Find Gershgorin-type bound if Q's much greater than E's. + var emax float64 + if n0 > i0 { + emin = math.Abs(z[4*(n0+1)-6]) + } else { + emin = 0 + } + qmin := z[4*(n0+1)-4] + qmax = qmin + zSmall := false + for i4loop := 4 * (n0 + 1); i4loop >= 8; i4loop -= 4 { + i4 = i4loop - 1 + if z[i4-5] <= 0 { + zSmall = true + break + } + if qmin >= 4*emax { + qmin = math.Min(qmin, z[i4-3]) + emax = math.Max(emax, z[i4-5]) + } + qmax = math.Max(qmax, z[i4-7]+z[i4-5]) + emin = math.Min(emin, z[i4-5]) + } + if !zSmall { + i4 = 3 + } + i0 = (i4+1)/4 - 1 + pp = 0 + if n0-i0 > 1 { + dee := z[4*i0] + deemin := dee + kmin := i0 + for i4loop := 4*(i0+1) + 1; i4loop <= 4*(n0+1)-3; i4loop += 4 { + i4 := i4loop - 1 + dee = z[i4] * (dee / (dee + z[i4-2])) + if dee <= deemin { + deemin = dee + kmin = (i4+4)/4 - 1 + } + } + if (kmin-i0)*2 < n0-kmin && deemin <= 0.5*z[4*n0] { + ipn4Out := 4 * (i0 + n0 + 2) + pp = 2 + for i4loop := 4 * (i0 + 1); i4loop <= 2*(i0+n0+1); i4loop += 4 { + i4 := i4loop - 1 + ipn4 := ipn4Out - 1 + z[i4-3], z[ipn4-i4-4] = z[ipn4-i4-4], z[i4-3] + z[i4-2], z[ipn4-i4-3] = z[ipn4-i4-3], z[i4-2] + z[i4-1], z[ipn4-i4-6] = z[ipn4-i4-6], z[i4-1] + z[i4], z[ipn4-i4-5] = z[ipn4-i4-5], z[i4] + } + } + } + // Put -(initial shift) into DMIN. + dmin := -math.Max(0, qmin-2*math.Sqrt(qmin)*math.Sqrt(emax)) + + // Now i0:n0 is unreduced. + // PP = 0 for ping, PP = 1 for pong. + // PP = 2 indicates that flipping was applied to the Z array and + // and that the tests for deflation upon entry in Dlasq3 + // should not be performed. + nbig := 100 * (n0 - i0 + 1) + for iwhilb := 0; iwhilb < nbig; iwhilb++ { + if i0 > n0 { + continue outer + } + + // While submatrix unfinished take a good dqds step. + i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau = + impl.Dlasq3(i0, n0, z, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau) + + pp = 1 - pp + // When emin is very small check for splits. + if pp == 0 && n0-i0 >= 3 { + if z[4*(n0+1)-1] <= tol2*qmax || z[4*(n0+1)-2] <= tol2*sigma { + splt := i0 - 1 + qmax = z[4*i0] + emin = z[4*(i0+1)-2] + oldemn := z[4*(i0+1)-1] + for i4loop := 4 * (i0 + 1); i4loop <= 4*(n0-2); i4loop += 4 { + i4 := i4loop - 1 + if z[i4] <= tol2*z[i4-3] || z[i4-1] <= tol2*sigma { + z[i4-1] = -sigma + splt = i4 / 4 + qmax = 0 + emin = z[i4+3] + oldemn = z[i4+4] + } else { + qmax = math.Max(qmax, z[i4+1]) + emin = math.Min(emin, z[i4-1]) + oldemn = math.Min(oldemn, z[i4]) + } + } + z[4*(n0+1)-2] = emin + z[4*(n0+1)-1] = oldemn + i0 = splt + 1 + } + } + } + // Maximum number of iterations exceeded, restore the shift + // sigma and place the new d's and e's in a qd array. + // This might need to be done for several blocks. + info = 2 + i1 = i0 + for { + tempq = z[4*i0] + z[4*i0] += sigma + for k := i0 + 1; k <= n0; k++ { + tempe := z[4*(k+1)-6] + z[4*(k+1)-6] *= tempq / z[4*(k+1)-8] + tempq = z[4*k] + z[4*k] += sigma + tempe - z[4*(k+1)-6] + } + // Prepare to do this on the previous block if there is one. + if i1 <= 0 { + break + } + n1 = i1 - 1 + for i1 >= 1 && z[4*(i1+1)-6] >= 0 { + i1 -= 1 + } + sigma = -z[4*(n1+1)-2] + } + for k := 0; k < n; k++ { + z[2*k] = z[4*k] + // Only the block 1..N0 is unfinished. The rest of the e's + // must be essentially zero, although sometimes other data + // has been stored in them. + if k < n0 { + z[2*(k+1)-1] = z[4*(k+1)-1] + } else { + z[2*(k+1)] = 0 + } + } + return info + } + info = 3 + return info +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go new file mode 100644 index 00000000..a05e94ef --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go @@ -0,0 +1,172 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq3 checks for deflation, computes a shift (tau) and calls dqds. +// In case of failure it changes shifts, and tries again until output +// is positive. +// +// Dlasq3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq3(i0, n0 int, z []float64, pp int, dmin, sigma, desig, qmax float64, nFail, iter, nDiv int, ttype int, dmin1, dmin2, dn, dn1, dn2, g, tau float64) ( + i0Out, n0Out, ppOut int, dminOut, sigmaOut, desigOut, qmaxOut float64, nFailOut, iterOut, nDivOut, ttypeOut int, dmin1Out, dmin2Out, dnOut, dn1Out, dn2Out, gOut, tauOut float64) { + switch { + case i0 < 0: + panic(i0LT0) + case n0 < 0: + panic(n0LT0) + case len(z) < 4*n0: + panic(shortZ) + case pp != 0 && pp != 1 && pp != 2: + panic(badPp) + } + + const cbias = 1.5 + + n0in := n0 + eps := dlamchP + tol := eps * 100 + tol2 := tol * tol + var nn int + var t float64 + for { + if n0 < i0 { + return i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau + } + if n0 == i0 { + z[4*(n0+1)-4] = z[4*(n0+1)+pp-4] + sigma + n0-- + continue + } + nn = 4*(n0+1) + pp - 1 + if n0 != i0+1 { + // Check whether e[n0-1] is negligible, 1 eigenvalue. + if z[nn-5] > tol2*(sigma+z[nn-3]) && z[nn-2*pp-4] > tol2*z[nn-7] { + // Check whether e[n0-2] is negligible, 2 eigenvalues. + if z[nn-9] > tol2*sigma && z[nn-2*pp-8] > tol2*z[nn-11] { + break + } + } else { + z[4*(n0+1)-4] = z[4*(n0+1)+pp-4] + sigma + n0-- + continue + } + } + if z[nn-3] > z[nn-7] { + z[nn-3], z[nn-7] = z[nn-7], z[nn-3] + } + t = 0.5 * (z[nn-7] - z[nn-3] + z[nn-5]) + if z[nn-5] > z[nn-3]*tol2 && t != 0 { + s := z[nn-3] * (z[nn-5] / t) + if s <= t { + s = z[nn-3] * (z[nn-5] / (t * (1 + math.Sqrt(1+s/t)))) + } else { + s = z[nn-3] * (z[nn-5] / (t + math.Sqrt(t)*math.Sqrt(t+s))) + } + t = z[nn-7] + (s + z[nn-5]) + z[nn-3] *= z[nn-7] / t + z[nn-7] = t + } + z[4*(n0+1)-8] = z[nn-7] + sigma + z[4*(n0+1)-4] = z[nn-3] + sigma + n0 -= 2 + } + if pp == 2 { + pp = 0 + } + + // Reverse the qd-array, if warranted. + if dmin <= 0 || n0 < n0in { + if cbias*z[4*(i0+1)+pp-4] < z[4*(n0+1)+pp-4] { + ipn4Out := 4 * (i0 + n0 + 2) + for j4loop := 4 * (i0 + 1); j4loop <= 2*((i0+1)+(n0+1)-1); j4loop += 4 { + ipn4 := ipn4Out - 1 + j4 := j4loop - 1 + + z[j4-3], z[ipn4-j4-4] = z[ipn4-j4-4], z[j4-3] + z[j4-2], z[ipn4-j4-3] = z[ipn4-j4-3], z[j4-2] + z[j4-1], z[ipn4-j4-6] = z[ipn4-j4-6], z[j4-1] + z[j4], z[ipn4-j4-5] = z[ipn4-j4-5], z[j4] + } + if n0-i0 <= 4 { + z[4*(n0+1)+pp-2] = z[4*(i0+1)+pp-2] + z[4*(n0+1)-pp-1] = z[4*(i0+1)-pp-1] + } + dmin2 = math.Min(dmin2, z[4*(i0+1)-pp-2]) + z[4*(n0+1)+pp-2] = math.Min(math.Min(z[4*(n0+1)+pp-2], z[4*(i0+1)+pp-2]), z[4*(i0+1)+pp+2]) + z[4*(n0+1)-pp-1] = math.Min(math.Min(z[4*(n0+1)-pp-1], z[4*(i0+1)-pp-1]), z[4*(i0+1)-pp+3]) + qmax = math.Max(math.Max(qmax, z[4*(i0+1)+pp-4]), z[4*(i0+1)+pp]) + dmin = math.Copysign(0, -1) // Fortran code has -zero, but -0 in go is 0 + } + } + + // Choose a shift. + tau, ttype, g = impl.Dlasq4(i0, n0, z, pp, n0in, dmin, dmin1, dmin2, dn, dn1, dn2, tau, ttype, g) + + // Call dqds until dmin > 0. +loop: + for { + i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dn1, dn2 = impl.Dlasq5(i0, n0, z, pp, tau, sigma) + + nDiv += n0 - i0 + 2 + iter++ + switch { + case dmin >= 0 && dmin1 >= 0: + // Success. + goto done + + case dmin < 0 && dmin1 > 0 && z[4*n0-pp-1] < tol*(sigma+dn1) && math.Abs(dn) < tol*sigma: + // Convergence hidden by negative dn. + z[4*n0-pp+1] = 0 + dmin = 0 + goto done + + case dmin < 0: + // Tau too big. Select new Tau and try again. + nFail++ + if ttype < -22 { + // Failed twice. Play it safe. + tau = 0 + } else if dmin1 > 0 { + // Late failure. Gives excellent shift. + tau = (tau + dmin) * (1 - 2*eps) + ttype -= 11 + } else { + // Early failure. Divide by 4. + tau = tau / 4 + ttype -= 12 + } + + case math.IsNaN(dmin): + if tau == 0 { + break loop + } + tau = 0 + + default: + // Possible underflow. Play it safe. + break loop + } + } + + // Risk of underflow. + dmin, dmin1, dmin2, dn, dn1, dn2 = impl.Dlasq6(i0, n0, z, pp) + nDiv += n0 - i0 + 2 + iter++ + tau = 0 + +done: + if tau < sigma { + desig += tau + t = sigma + desig + desig -= t - sigma + } else { + t = sigma + tau + desig += sigma - (t - tau) + } + sigma = t + return i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go new file mode 100644 index 00000000..f6dbb31b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go @@ -0,0 +1,249 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq4 computes an approximation to the smallest eigenvalue using values of d +// from the previous transform. +// i0, n0, and n0in are zero-indexed. +// +// Dlasq4 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq4(i0, n0 int, z []float64, pp int, n0in int, dmin, dmin1, dmin2, dn, dn1, dn2, tau float64, ttype int, g float64) (tauOut float64, ttypeOut int, gOut float64) { + switch { + case i0 < 0: + panic(i0LT0) + case n0 < 0: + panic(n0LT0) + case len(z) < 4*n0: + panic(shortZ) + case pp != 0 && pp != 1: + panic(badPp) + } + + const ( + cnst1 = 0.563 + cnst2 = 1.01 + cnst3 = 1.05 + + cnstthird = 0.333 // TODO(btracey): Fix? + ) + // A negative dmin forces the shift to take that absolute value + // ttype records the type of shift. + if dmin <= 0 { + tau = -dmin + ttype = -1 + return tau, ttype, g + } + nn := 4*(n0+1) + pp - 1 // -1 for zero indexing + s := math.NaN() // Poison s so that failure to take a path below is obvious + if n0in == n0 { + // No eigenvalues deflated. + if dmin == dn || dmin == dn1 { + b1 := math.Sqrt(z[nn-3]) * math.Sqrt(z[nn-5]) + b2 := math.Sqrt(z[nn-7]) * math.Sqrt(z[nn-9]) + a2 := z[nn-7] + z[nn-5] + if dmin == dn && dmin1 == dn1 { + gap2 := dmin2 - a2 - dmin2/4 + var gap1 float64 + if gap2 > 0 && gap2 > b2 { + gap1 = a2 - dn - (b2/gap2)*b2 + } else { + gap1 = a2 - dn - (b1 + b2) + } + if gap1 > 0 && gap1 > b1 { + s = math.Max(dn-(b1/gap1)*b1, 0.5*dmin) + ttype = -2 + } else { + s = 0 + if dn > b1 { + s = dn - b1 + } + if a2 > b1+b2 { + s = math.Min(s, a2-(b1+b2)) + } + s = math.Max(s, cnstthird*dmin) + ttype = -3 + } + } else { + ttype = -4 + s = dmin / 4 + var gam float64 + var np int + if dmin == dn { + gam = dn + a2 = 0 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b2 = z[nn-5] / z[nn-7] + np = nn - 9 + } else { + np = nn - 2*pp + gam = dn1 + if z[np-4] > z[np-2] { + return tau, ttype, g + } + a2 = z[np-4] / z[np-2] + if z[nn-9] > z[nn-11] { + return tau, ttype, g + } + b2 = z[nn-9] / z[nn-11] + np = nn - 13 + } + // Approximate contribution to norm squared from i < nn-1. + a2 += b2 + for i4loop := np + 1; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if b2 == 0 { + break + } + b1 = b2 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b2 *= z[i4] / z[i4-2] + a2 += b2 + if 100*math.Max(b2, b1) < a2 || cnst1 < a2 { + break + } + } + a2 *= cnst3 + // Rayleigh quotient residual bound. + if a2 < cnst1 { + s = gam * (1 - math.Sqrt(a2)) / (1 + a2) + } + } + } else if dmin == dn2 { + ttype = -5 + s = dmin / 4 + // Compute contribution to norm squared from i > nn-2. + np := nn - 2*pp + b1 := z[np-2] + b2 := z[np-6] + gam := dn2 + if z[np-8] > b2 || z[np-4] > b1 { + return tau, ttype, g + } + a2 := (z[np-8] / b2) * (1 + z[np-4]/b1) + // Approximate contribution to norm squared from i < nn-2. + if n0-i0 > 2 { + b2 = z[nn-13] / z[nn-15] + a2 += b2 + for i4loop := (nn + 1) - 17; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if b2 == 0 { + break + } + b1 = b2 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b2 *= z[i4] / z[i4-2] + a2 += b2 + if 100*math.Max(b2, b1) < a2 || cnst1 < a2 { + break + } + } + a2 *= cnst3 + } + if a2 < cnst1 { + s = gam * (1 - math.Sqrt(a2)) / (1 + a2) + } + } else { + // Case 6, no information to guide us. + if ttype == -6 { + g += cnstthird * (1 - g) + } else if ttype == -18 { + g = cnstthird / 4 + } else { + g = 1.0 / 4 + } + s = g * dmin + ttype = -6 + } + } else if n0in == (n0 + 1) { + // One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. + if dmin1 == dn1 && dmin2 == dn2 { + ttype = -7 + s = cnstthird * dmin1 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b1 := z[nn-5] / z[nn-7] + b2 := b1 + if b2 != 0 { + for i4loop := 4*(n0+1) - 9 + pp; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + a2 := b1 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b1 *= z[i4] / z[i4-2] + b2 += b1 + if 100*math.Max(b1, a2) < b2 { + break + } + } + } + b2 = math.Sqrt(cnst3 * b2) + a2 := dmin1 / (1 + b2*b2) + gap2 := 0.5*dmin2 - a2 + if gap2 > 0 && gap2 > b2*a2 { + s = math.Max(s, a2*(1-cnst2*a2*(b2/gap2)*b2)) + } else { + s = math.Max(s, a2*(1-cnst2*b2)) + ttype = -8 + } + } else { + s = dmin1 / 4 + if dmin1 == dn1 { + s = 0.5 * dmin1 + } + ttype = -9 + } + } else if n0in == (n0 + 2) { + // Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. + if dmin2 == dn2 && 2*z[nn-5] < z[nn-7] { + ttype = -10 + s = cnstthird * dmin2 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b1 := z[nn-5] / z[nn-7] + b2 := b1 + if b2 != 0 { + for i4loop := 4*(n0+1) - 9 + pp; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b1 *= z[i4] / z[i4-2] + b2 += b1 + if 100*b1 < b2 { + break + } + } + } + b2 = math.Sqrt(cnst3 * b2) + a2 := dmin2 / (1 + b2*b2) + gap2 := z[nn-7] + z[nn-9] - math.Sqrt(z[nn-11])*math.Sqrt(z[nn-9]) - a2 + if gap2 > 0 && gap2 > b2*a2 { + s = math.Max(s, a2*(1-cnst2*a2*(b2/gap2)*b2)) + } else { + s = math.Max(s, a2*(1-cnst2*b2)) + } + } else { + s = dmin2 / 4 + ttype = -11 + } + } else if n0in > n0+2 { + // Case 12, more than two eigenvalues deflated. No information. + s = 0 + ttype = -12 + } + tau = s + return tau, ttype, g +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go new file mode 100644 index 00000000..d3826d91 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq5 computes one dqds transform in ping-pong form. +// i0 and n0 are zero-indexed. +// +// Dlasq5 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq5(i0, n0 int, z []float64, pp int, tau, sigma float64) (i0Out, n0Out, ppOut int, tauOut, sigmaOut, dmin, dmin1, dmin2, dn, dnm1, dnm2 float64) { + // The lapack function has inputs for ieee and eps, but Go requires ieee so + // these are unnecessary. + + switch { + case i0 < 0: + panic(i0LT0) + case n0 < 0: + panic(n0LT0) + case len(z) < 4*n0: + panic(shortZ) + case pp != 0 && pp != 1: + panic(badPp) + } + + if n0-i0-1 <= 0 { + return i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dnm1, dnm2 + } + + eps := dlamchP + dthresh := eps * (sigma + tau) + if tau < dthresh*0.5 { + tau = 0 + } + var j4 int + var emin float64 + if tau != 0 { + j4 = 4*i0 + pp + emin = z[j4+4] + d := z[j4] - tau + dmin = d + // In the reference there are code paths that actually return this value. + // dmin1 = -z[j4] + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-2] = d + z[j4-1] + tmp := z[j4+1] / z[j4-2] + d = d*tmp - tau + dmin = math.Min(dmin, d) + z[j4] = z[j4-1] * tmp + emin = math.Min(z[j4], emin) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + tmp := z[j4+2] / z[j4-3] + d = d*tmp - tau + dmin = math.Min(dmin, d) + z[j4-1] = z[j4] * tmp + emin = math.Min(z[j4-1], emin) + } + } + // Unroll the last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*((n0+1)-2) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2]*(dnm2/z[j4-2]) - tau + dmin = math.Min(dmin, dnm1) + + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2]*(dnm1/z[j4-2]) - tau + dmin = math.Min(dmin, dn) + } else { + // This is the version that sets d's to zero if they are small enough. + j4 = 4*(i0+1) + pp - 4 + emin = z[j4+4] + d := z[j4] - tau + dmin = d + // In the reference there are code paths that actually return this value. + // dmin1 = -z[j4] + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-2] = d + z[j4-1] + tmp := z[j4+1] / z[j4-2] + d = d*tmp - tau + if d < dthresh { + d = 0 + } + dmin = math.Min(dmin, d) + z[j4] = z[j4-1] * tmp + emin = math.Min(z[j4], emin) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + tmp := z[j4+2] / z[j4-3] + d = d*tmp - tau + if d < dthresh { + d = 0 + } + dmin = math.Min(dmin, d) + z[j4-1] = z[j4] * tmp + emin = math.Min(z[j4-1], emin) + } + } + // Unroll the last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*((n0+1)-2) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2]*(dnm2/z[j4-2]) - tau + dmin = math.Min(dmin, dnm1) + + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2]*(dnm1/z[j4-2]) - tau + dmin = math.Min(dmin, dn) + } + z[j4+2] = dn + z[4*(n0+1)-pp-1] = emin + return i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dnm1, dnm2 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go new file mode 100644 index 00000000..54bf5875 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go @@ -0,0 +1,118 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq6 computes one dqd transform in ping-pong form with protection against +// overflow and underflow. z has length at least 4*(n0+1) and holds the qd array. +// i0 is the zero-based first index. +// n0 is the zero-based last index. +// +// Dlasq6 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq6(i0, n0 int, z []float64, pp int) (dmin, dmin1, dmin2, dn, dnm1, dnm2 float64) { + switch { + case i0 < 0: + panic(i0LT0) + case n0 < 0: + panic(n0LT0) + case len(z) < 4*n0: + panic(shortZ) + case pp != 0 && pp != 1: + panic(badPp) + } + + if n0-i0-1 <= 0 { + return dmin, dmin1, dmin2, dn, dnm1, dnm2 + } + + safmin := dlamchS + j4 := 4*(i0+1) + pp - 4 // -4 rather than -3 for zero indexing + emin := z[j4+4] + d := z[j4] + dmin = d + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 // Translate back to zero-indexed. + z[j4-2] = d + z[j4-1] + if z[j4-2] == 0 { + z[j4] = 0 + d = z[j4+1] + dmin = d + emin = 0 + } else if safmin*z[j4+1] < z[j4-2] && safmin*z[j4-2] < z[j4+1] { + tmp := z[j4+1] / z[j4-2] + z[j4] = z[j4-1] * tmp + d *= tmp + } else { + z[j4] = z[j4+1] * (z[j4-1] / z[j4-2]) + d = z[j4+1] * (d / z[j4-2]) + } + dmin = math.Min(dmin, d) + emin = math.Min(emin, z[j4]) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + if z[j4-3] == 0 { + z[j4-1] = 0 + d = z[j4+2] + dmin = d + emin = 0 + } else if safmin*z[j4+2] < z[j4-3] && safmin*z[j4-3] < z[j4+2] { + tmp := z[j4+2] / z[j4-3] + z[j4-1] = z[j4] * tmp + d *= tmp + } else { + z[j4-1] = z[j4+2] * (z[j4] / z[j4-3]) + d = z[j4+2] * (d / z[j4-3]) + } + dmin = math.Min(dmin, d) + emin = math.Min(emin, z[j4-1]) + } + } + // Unroll last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*(n0-1) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + if z[j4-2] == 0 { + z[j4] = 0 + dnm1 = z[j4p2+2] + dmin = dnm1 + emin = 0 + } else if safmin*z[j4p2+2] < z[j4-2] && safmin*z[j4-2] < z[j4p2+2] { + tmp := z[j4p2+2] / z[j4-2] + z[j4] = z[j4p2] * tmp + dnm1 = dnm2 * tmp + } else { + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2] * (dnm2 / z[j4-2]) + } + dmin = math.Min(dmin, dnm1) + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + if z[j4-2] == 0 { + z[j4] = 0 + dn = z[j4p2+2] + dmin = dn + emin = 0 + } else if safmin*z[j4p2+2] < z[j4-2] && safmin*z[j4-2] < z[j4p2+2] { + tmp := z[j4p2+2] / z[j4-2] + z[j4] = z[j4p2] * tmp + dn = dnm1 * tmp + } else { + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2] * (dnm1 / z[j4-2]) + } + dmin = math.Min(dmin, dn) + z[j4+2] = dn + z[4*(n0+1)-pp-1] = emin + return dmin, dmin1, dmin2, dn, dnm1, dnm2 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go new file mode 100644 index 00000000..a7dbe002 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go @@ -0,0 +1,279 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlasr applies a sequence of plane rotations to the m×n matrix A. This series +// of plane rotations is implicitly represented by a matrix P. P is multiplied +// by a depending on the value of side -- A = P * A if side == lapack.Left, +// A = A * P^T if side == lapack.Right. +// +// The exact value of P depends on the value of pivot, but in all cases P is +// implicitly represented by a series of 2×2 rotation matrices. The entries of +// rotation matrix k are defined by s[k] and c[k] +// R(k) = [ c[k] s[k]] +// [-s[k] s[k]] +// If direct == lapack.Forward, the rotation matrices are applied as +// P = P(z-1) * ... * P(2) * P(1), while if direct == lapack.Backward they are +// applied as P = P(1) * P(2) * ... * P(n). +// +// pivot defines the mapping of the elements in R(k) to P(k). +// If pivot == lapack.Variable, the rotation is performed for the (k, k+1) plane. +// P(k) = [1 ] +// [ ... ] +// [ 1 ] +// [ c[k] s[k] ] +// [ -s[k] c[k] ] +// [ 1 ] +// [ ... ] +// [ 1] +// if pivot == lapack.Top, the rotation is performed for the (1, k+1) plane, +// P(k) = [c[k] s[k] ] +// [ 1 ] +// [ ... ] +// [ 1 ] +// [-s[k] c[k] ] +// [ 1 ] +// [ ... ] +// [ 1] +// and if pivot == lapack.Bottom, the rotation is performed for the (k, z) plane. +// P(k) = [1 ] +// [ ... ] +// [ 1 ] +// [ c[k] s[k]] +// [ 1 ] +// [ ... ] +// [ 1 ] +// [ -s[k] c[k]] +// s and c have length m - 1 if side == blas.Left, and n - 1 if side == blas.Right. +// +// Dlasr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasr(side blas.Side, pivot lapack.Pivot, direct lapack.Direct, m, n int, c, s, a []float64, lda int) { + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case pivot != lapack.Variable && pivot != lapack.Top && pivot != lapack.Bottom: + panic(badPivot) + case direct != lapack.Forward && direct != lapack.Backward: + panic(badDirect) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + if side == blas.Left { + if len(c) < m-1 { + panic(shortC) + } + if len(s) < m-1 { + panic(shortS) + } + } else { + if len(c) < n-1 { + panic(shortC) + } + if len(s) < n-1 { + panic(shortS) + } + } + if len(a) < (m-1)*lda+n { + panic(shortA) + } + + if side == blas.Left { + if pivot == lapack.Variable { + if direct == lapack.Forward { + for j := 0; j < m-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp2 := a[j*lda+i] + tmp := a[(j+1)*lda+i] + a[(j+1)*lda+i] = ctmp*tmp - stmp*tmp2 + a[j*lda+i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := m - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp2 := a[j*lda+i] + tmp := a[(j+1)*lda+i] + a[(j+1)*lda+i] = ctmp*tmp - stmp*tmp2 + a[j*lda+i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } else if pivot == lapack.Top { + if direct == lapack.Forward { + for j := 1; j < m; j++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[i] + a[j*lda+i] = ctmp*tmp - stmp*tmp2 + a[i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := m - 1; j >= 1; j-- { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[i] + a[j*lda+i] = ctmp*tmp - stmp*tmp2 + a[i] = stmp*tmp + ctmp*tmp2 + } + } + } + } + } + return + } + if direct == lapack.Forward { + for j := 0; j < m-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[(m-1)*lda+i] + a[j*lda+i] = stmp*tmp2 + ctmp*tmp + a[(m-1)*lda+i] = ctmp*tmp2 - stmp*tmp + } + } + } + return + } + for j := m - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[(m-1)*lda+i] + a[j*lda+i] = stmp*tmp2 + ctmp*tmp + a[(m-1)*lda+i] = ctmp*tmp2 - stmp*tmp + } + } + } + return + } + if pivot == lapack.Variable { + if direct == lapack.Forward { + for j := 0; j < n-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j+1] + tmp2 := a[i*lda+j] + a[i*lda+j+1] = ctmp*tmp - stmp*tmp2 + a[i*lda+j] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := n - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j+1] + tmp2 := a[i*lda+j] + a[i*lda+j+1] = ctmp*tmp - stmp*tmp2 + a[i*lda+j] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } else if pivot == lapack.Top { + if direct == lapack.Forward { + for j := 1; j < n; j++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda] + a[i*lda+j] = ctmp*tmp - stmp*tmp2 + a[i*lda] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := n - 1; j >= 1; j-- { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda] + a[i*lda+j] = ctmp*tmp - stmp*tmp2 + a[i*lda] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + if direct == lapack.Forward { + for j := 0; j < n-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda+n-1] + a[i*lda+j] = stmp*tmp2 + ctmp*tmp + a[i*lda+n-1] = ctmp*tmp2 - stmp*tmp + } + + } + } + return + } + for j := n - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda+n-1] + a[i*lda+j] = stmp*tmp2 + ctmp*tmp + a[i*lda+n-1] = ctmp*tmp2 - stmp*tmp + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go new file mode 100644 index 00000000..be472805 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go @@ -0,0 +1,36 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "sort" + + "gonum.org/v1/gonum/lapack" +) + +// Dlasrt sorts the numbers in the input slice d. If s == lapack.SortIncreasing, +// the elements are sorted in increasing order. If s == lapack.SortDecreasing, +// the elements are sorted in decreasing order. For other values of s Dlasrt +// will panic. +// +// Dlasrt is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasrt(s lapack.Sort, n int, d []float64) { + switch { + case n < 0: + panic(nLT0) + case len(d) < n: + panic(shortD) + } + + d = d[:n] + switch s { + default: + panic(badSort) + case lapack.SortIncreasing: + sort.Float64s(d) + case lapack.SortDecreasing: + sort.Sort(sort.Reverse(sort.Float64Slice(d))) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go new file mode 100644 index 00000000..9c2dc772 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go @@ -0,0 +1,41 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlassq updates a sum of squares in scaled form. The input parameters scale and +// sumsq represent the current scale and total sum of squares. These values are +// updated with the information in the first n elements of the vector specified +// by x and incX. +// +// Dlassq is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlassq(n int, x []float64, incx int, scale float64, sumsq float64) (scl, smsq float64) { + switch { + case n < 0: + panic(nLT0) + case incx <= 0: + panic(badIncX) + case len(x) < 1+(n-1)*incx: + panic(shortX) + } + + if n == 0 { + return scale, sumsq + } + + for ix := 0; ix <= (n-1)*incx; ix += incx { + absxi := math.Abs(x[ix]) + if absxi > 0 || math.IsNaN(absxi) { + if scale < absxi { + sumsq = 1 + sumsq*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumsq += (absxi / scale) * (absxi / scale) + } + } + } + return scale, sumsq +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go new file mode 100644 index 00000000..204af193 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go @@ -0,0 +1,115 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasv2 computes the singular value decomposition of a 2×2 matrix. +// [ csl snl] [f g] [csr -snr] = [ssmax 0] +// [-snl csl] [0 h] [snr csr] = [ 0 ssmin] +// ssmax is the larger absolute singular value, and ssmin is the smaller absolute +// singular value. [cls, snl] and [csr, snr] are the left and right singular vectors. +// +// Dlasv2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasv2(f, g, h float64) (ssmin, ssmax, snr, csr, snl, csl float64) { + ft := f + fa := math.Abs(ft) + ht := h + ha := math.Abs(h) + // pmax points to the largest element of the matrix in terms of absolute value. + // 1 if F, 2 if G, 3 if H. + pmax := 1 + swap := ha > fa + if swap { + pmax = 3 + ft, ht = ht, ft + fa, ha = ha, fa + } + gt := g + ga := math.Abs(gt) + var clt, crt, slt, srt float64 + if ga == 0 { + ssmin = ha + ssmax = fa + clt = 1 + crt = 1 + slt = 0 + srt = 0 + } else { + gasmall := true + if ga > fa { + pmax = 2 + if (fa / ga) < dlamchE { + gasmall = false + ssmax = ga + if ha > 1 { + ssmin = fa / (ga / ha) + } else { + ssmin = (fa / ga) * ha + } + clt = 1 + slt = ht / gt + srt = 1 + crt = ft / gt + } + } + if gasmall { + d := fa - ha + l := d / fa + if d == fa { // deal with inf + l = 1 + } + m := gt / ft + t := 2 - l + s := math.Hypot(t, m) + var r float64 + if l == 0 { + r = math.Abs(m) + } else { + r = math.Hypot(l, m) + } + a := 0.5 * (s + r) + ssmin = ha / a + ssmax = fa * a + if m == 0 { + if l == 0 { + t = math.Copysign(2, ft) * math.Copysign(1, gt) + } else { + t = gt/math.Copysign(d, ft) + m/t + } + } else { + t = (m/(s+t) + m/(r+l)) * (1 + a) + } + l = math.Hypot(t, 2) + crt = 2 / l + srt = t / l + clt = (crt + srt*m) / a + slt = (ht / ft) * srt / a + } + } + if swap { + csl = srt + snl = crt + csr = slt + snr = clt + } else { + csl = clt + snl = slt + csr = crt + snr = srt + } + var tsign float64 + switch pmax { + case 1: + tsign = math.Copysign(1, csr) * math.Copysign(1, csl) * math.Copysign(1, f) + case 2: + tsign = math.Copysign(1, snr) * math.Copysign(1, csl) * math.Copysign(1, g) + case 3: + tsign = math.Copysign(1, snr) * math.Copysign(1, snl) * math.Copysign(1, h) + } + ssmax = math.Copysign(ssmax, tsign) + ssmin = math.Copysign(ssmin, tsign*math.Copysign(1, f)*math.Copysign(1, h)) + return ssmin, ssmax, snr, csr, snl, csl +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go new file mode 100644 index 00000000..b207d121 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go @@ -0,0 +1,52 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlaswp swaps the rows k1 to k2 of a rectangular matrix A according to the +// indices in ipiv so that row k is swapped with ipiv[k]. +// +// n is the number of columns of A and incX is the increment for ipiv. If incX +// is 1, the swaps are applied from k1 to k2. If incX is -1, the swaps are +// applied in reverse order from k2 to k1. For other values of incX Dlaswp will +// panic. ipiv must have length k2+1, otherwise Dlaswp will panic. +// +// The indices k1, k2, and the elements of ipiv are zero-based. +// +// Dlaswp is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaswp(n int, a []float64, lda int, k1, k2 int, ipiv []int, incX int) { + switch { + case n < 0: + panic(nLT0) + case k2 < 0: + panic(badK2) + case k1 < 0 || k2 < k1: + panic(badK1) + case lda < max(1, n): + panic(badLdA) + case len(a) < (k2-1)*lda+n: + panic(shortA) + case len(ipiv) != k2+1: + panic(badLenIpiv) + case incX != 1 && incX != -1: + panic(absIncNotOne) + } + + if n == 0 { + return + } + + bi := blas64.Implementation() + if incX == 1 { + for k := k1; k <= k2; k++ { + bi.Dswap(n, a[k*lda:], 1, a[ipiv[k]*lda:], 1) + } + return + } + for k := k2; k >= k1; k-- { + bi.Dswap(n, a[k*lda:], 1, a[ipiv[k]*lda:], 1) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go new file mode 100644 index 00000000..abfe60e5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go @@ -0,0 +1,290 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlasy2 solves the Sylvester matrix equation where the matrices are of order 1 +// or 2. It computes the unknown n1×n2 matrix X so that +// TL*X + sgn*X*TR = scale*B, if tranl == false and tranr == false, +// TL^T*X + sgn*X*TR = scale*B, if tranl == true and tranr == false, +// TL*X + sgn*X*TR^T = scale*B, if tranl == false and tranr == true, +// TL^T*X + sgn*X*TR^T = scale*B, if tranl == true and tranr == true, +// where TL is n1×n1, TR is n2×n2, B is n1×n2, and 1 <= n1,n2 <= 2. +// +// isgn must be 1 or -1, and n1 and n2 must be 0, 1, or 2, but these conditions +// are not checked. +// +// Dlasy2 returns three values, a scale factor that is chosen less than or equal +// to 1 to prevent the solution overflowing, the infinity norm of the solution, +// and an indicator of success. If ok is false, TL and TR have eigenvalues that +// are too close, so TL or TR is perturbed to get a non-singular equation. +// +// Dlasy2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasy2(tranl, tranr bool, isgn, n1, n2 int, tl []float64, ldtl int, tr []float64, ldtr int, b []float64, ldb int, x []float64, ldx int) (scale, xnorm float64, ok bool) { + // TODO(vladimir-ch): Add input validation checks conditionally skipped + // using the build tag mechanism. + + ok = true + // Quick return if possible. + if n1 == 0 || n2 == 0 { + return scale, xnorm, ok + } + + // Set constants to control overflow. + eps := dlamchP + smlnum := dlamchS / eps + sgn := float64(isgn) + + if n1 == 1 && n2 == 1 { + // 1×1 case: TL11*X + sgn*X*TR11 = B11. + tau1 := tl[0] + sgn*tr[0] + bet := math.Abs(tau1) + if bet <= smlnum { + tau1 = smlnum + bet = smlnum + ok = false + } + scale = 1 + gam := math.Abs(b[0]) + if smlnum*gam > bet { + scale = 1 / gam + } + x[0] = b[0] * scale / tau1 + xnorm = math.Abs(x[0]) + return scale, xnorm, ok + } + + if n1+n2 == 3 { + // 1×2 or 2×1 case. + var ( + smin float64 + tmp [4]float64 // tmp is used as a 2×2 row-major matrix. + btmp [2]float64 + ) + if n1 == 1 && n2 == 2 { + // 1×2 case: TL11*[X11 X12] + sgn*[X11 X12]*op[TR11 TR12] = [B11 B12]. + // [TR21 TR22] + smin = math.Abs(tl[0]) + smin = math.Max(smin, math.Max(math.Abs(tr[0]), math.Abs(tr[1]))) + smin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1]))) + smin = math.Max(eps*smin, smlnum) + tmp[0] = tl[0] + sgn*tr[0] + tmp[3] = tl[0] + sgn*tr[ldtr+1] + if tranr { + tmp[1] = sgn * tr[1] + tmp[2] = sgn * tr[ldtr] + } else { + tmp[1] = sgn * tr[ldtr] + tmp[2] = sgn * tr[1] + } + btmp[0] = b[0] + btmp[1] = b[1] + } else { + // 2×1 case: op[TL11 TL12]*[X11] + sgn*[X11]*TR11 = [B11]. + // [TL21 TL22]*[X21] [X21] [B21] + smin = math.Abs(tr[0]) + smin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1]))) + smin = math.Max(eps*smin, smlnum) + tmp[0] = tl[0] + sgn*tr[0] + tmp[3] = tl[ldtl+1] + sgn*tr[0] + if tranl { + tmp[1] = tl[ldtl] + tmp[2] = tl[1] + } else { + tmp[1] = tl[1] + tmp[2] = tl[ldtl] + } + btmp[0] = b[0] + btmp[1] = b[ldb] + } + + // Solve 2×2 system using complete pivoting. + // Set pivots less than smin to smin. + + bi := blas64.Implementation() + ipiv := bi.Idamax(len(tmp), tmp[:], 1) + // Compute the upper triangular matrix [u11 u12]. + // [ 0 u22] + u11 := tmp[ipiv] + if math.Abs(u11) <= smin { + ok = false + u11 = smin + } + locu12 := [4]int{1, 0, 3, 2} // Index in tmp of the element on the same row as the pivot. + u12 := tmp[locu12[ipiv]] + locl21 := [4]int{2, 3, 0, 1} // Index in tmp of the element on the same column as the pivot. + l21 := tmp[locl21[ipiv]] / u11 + locu22 := [4]int{3, 2, 1, 0} // Index in tmp of the remaining element. + u22 := tmp[locu22[ipiv]] - l21*u12 + if math.Abs(u22) <= smin { + ok = false + u22 = smin + } + if ipiv&0x2 != 0 { // true for ipiv equal to 2 and 3. + // The pivot was in the second row, swap the elements of + // the right-hand side. + btmp[0], btmp[1] = btmp[1], btmp[0]-l21*btmp[1] + } else { + btmp[1] -= l21 * btmp[0] + } + scale = 1 + if 2*smlnum*math.Abs(btmp[1]) > math.Abs(u22) || 2*smlnum*math.Abs(btmp[0]) > math.Abs(u11) { + scale = 0.5 / math.Max(math.Abs(btmp[0]), math.Abs(btmp[1])) + btmp[0] *= scale + btmp[1] *= scale + } + // Solve the system [u11 u12] [x21] = [ btmp[0] ]. + // [ 0 u22] [x22] [ btmp[1] ] + x22 := btmp[1] / u22 + x21 := btmp[0]/u11 - (u12/u11)*x22 + if ipiv&0x1 != 0 { // true for ipiv equal to 1 and 3. + // The pivot was in the second column, swap the elements + // of the solution. + x21, x22 = x22, x21 + } + x[0] = x21 + if n1 == 1 { + x[1] = x22 + xnorm = math.Abs(x[0]) + math.Abs(x[1]) + } else { + x[ldx] = x22 + xnorm = math.Max(math.Abs(x[0]), math.Abs(x[ldx])) + } + return scale, xnorm, ok + } + + // 2×2 case: op[TL11 TL12]*[X11 X12] + SGN*[X11 X12]*op[TR11 TR12] = [B11 B12]. + // [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22] + // + // Solve equivalent 4×4 system using complete pivoting. + // Set pivots less than smin to smin. + + smin := math.Max(math.Abs(tr[0]), math.Abs(tr[1])) + smin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1]))) + smin = math.Max(eps*smin, smlnum) + + var t [4][4]float64 + t[0][0] = tl[0] + sgn*tr[0] + t[1][1] = tl[0] + sgn*tr[ldtr+1] + t[2][2] = tl[ldtl+1] + sgn*tr[0] + t[3][3] = tl[ldtl+1] + sgn*tr[ldtr+1] + if tranl { + t[0][2] = tl[ldtl] + t[1][3] = tl[ldtl] + t[2][0] = tl[1] + t[3][1] = tl[1] + } else { + t[0][2] = tl[1] + t[1][3] = tl[1] + t[2][0] = tl[ldtl] + t[3][1] = tl[ldtl] + } + if tranr { + t[0][1] = sgn * tr[1] + t[1][0] = sgn * tr[ldtr] + t[2][3] = sgn * tr[1] + t[3][2] = sgn * tr[ldtr] + } else { + t[0][1] = sgn * tr[ldtr] + t[1][0] = sgn * tr[1] + t[2][3] = sgn * tr[ldtr] + t[3][2] = sgn * tr[1] + } + + var btmp [4]float64 + btmp[0] = b[0] + btmp[1] = b[1] + btmp[2] = b[ldb] + btmp[3] = b[ldb+1] + + // Perform elimination. + var jpiv [4]int // jpiv records any column swaps for pivoting. + for i := 0; i < 3; i++ { + var ( + xmax float64 + ipsv, jpsv int + ) + for ip := i; ip < 4; ip++ { + for jp := i; jp < 4; jp++ { + if math.Abs(t[ip][jp]) >= xmax { + xmax = math.Abs(t[ip][jp]) + ipsv = ip + jpsv = jp + } + } + } + if ipsv != i { + // The pivot is not in the top row of the unprocessed + // block, swap rows ipsv and i of t and btmp. + t[ipsv], t[i] = t[i], t[ipsv] + btmp[ipsv], btmp[i] = btmp[i], btmp[ipsv] + } + if jpsv != i { + // The pivot is not in the left column of the + // unprocessed block, swap columns jpsv and i of t. + for k := 0; k < 4; k++ { + t[k][jpsv], t[k][i] = t[k][i], t[k][jpsv] + } + } + jpiv[i] = jpsv + if math.Abs(t[i][i]) < smin { + ok = false + t[i][i] = smin + } + for k := i + 1; k < 4; k++ { + t[k][i] /= t[i][i] + btmp[k] -= t[k][i] * btmp[i] + for j := i + 1; j < 4; j++ { + t[k][j] -= t[k][i] * t[i][j] + } + } + } + if math.Abs(t[3][3]) < smin { + ok = false + t[3][3] = smin + } + scale = 1 + if 8*smlnum*math.Abs(btmp[0]) > math.Abs(t[0][0]) || + 8*smlnum*math.Abs(btmp[1]) > math.Abs(t[1][1]) || + 8*smlnum*math.Abs(btmp[2]) > math.Abs(t[2][2]) || + 8*smlnum*math.Abs(btmp[3]) > math.Abs(t[3][3]) { + + maxbtmp := math.Max(math.Abs(btmp[0]), math.Abs(btmp[1])) + maxbtmp = math.Max(maxbtmp, math.Max(math.Abs(btmp[2]), math.Abs(btmp[3]))) + scale = 1 / 8 / maxbtmp + btmp[0] *= scale + btmp[1] *= scale + btmp[2] *= scale + btmp[3] *= scale + } + // Compute the solution of the upper triangular system t * tmp = btmp. + var tmp [4]float64 + for i := 3; i >= 0; i-- { + temp := 1 / t[i][i] + tmp[i] = btmp[i] * temp + for j := i + 1; j < 4; j++ { + tmp[i] -= temp * t[i][j] * tmp[j] + } + } + for i := 2; i >= 0; i-- { + if jpiv[i] != i { + tmp[i], tmp[jpiv[i]] = tmp[jpiv[i]], tmp[i] + } + } + x[0] = tmp[0] + x[1] = tmp[1] + x[ldx] = tmp[2] + x[ldx+1] = tmp[3] + xnorm = math.Max(math.Abs(tmp[0])+math.Abs(tmp[1]), math.Abs(tmp[2])+math.Abs(tmp[3])) + return scale, xnorm, ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go new file mode 100644 index 00000000..018efc98 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go @@ -0,0 +1,165 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlatrd reduces nb rows and columns of a real n×n symmetric matrix A to symmetric +// tridiagonal form. It computes the orthonormal similarity transformation +// Q^T * A * Q +// and returns the matrices V and W to apply to the unreduced part of A. If +// uplo == blas.Upper, the upper triangle is supplied and the last nb rows are +// reduced. If uplo == blas.Lower, the lower triangle is supplied and the first +// nb rows are reduced. +// +// a contains the symmetric matrix on entry with active triangular half specified +// by uplo. On exit, the nb columns have been reduced to tridiagonal form. The +// diagonal contains the diagonal of the reduced matrix, the off-diagonal is +// set to 1, and the remaining elements contain the data to construct Q. +// +// If uplo == blas.Upper, with n = 5 and nb = 2 on exit a is +// [ a a a v4 v5] +// [ a a v4 v5] +// [ a 1 v5] +// [ d 1] +// [ d] +// +// If uplo == blas.Lower, with n = 5 and nb = 2, on exit a is +// [ d ] +// [ 1 d ] +// [v1 1 a ] +// [v1 v2 a a ] +// [v1 v2 a a a] +// +// e contains the superdiagonal elements of the reduced matrix. If uplo == blas.Upper, +// e[n-nb:n-1] contains the last nb columns of the reduced matrix, while if +// uplo == blas.Lower, e[:nb] contains the first nb columns of the reduced matrix. +// e must have length at least n-1, and Dlatrd will panic otherwise. +// +// tau contains the scalar factors of the elementary reflectors needed to construct Q. +// The reflectors are stored in tau[n-nb:n-1] if uplo == blas.Upper, and in +// tau[:nb] if uplo == blas.Lower. tau must have length n-1, and Dlatrd will panic +// otherwise. +// +// w is an n×nb matrix. On exit it contains the data to update the unreduced part +// of A. +// +// The matrix Q is represented as a product of elementary reflectors. Each reflector +// H has the form +// I - tau * v * v^T +// If uplo == blas.Upper, +// Q = H_{n-1} * H_{n-2} * ... * H_{n-nb} +// where v[:i-1] is stored in A[:i-1,i], v[i-1] = 1, and v[i:n] = 0. +// +// If uplo == blas.Lower, +// Q = H_0 * H_1 * ... * H_{nb-1} +// where v[:i+1] = 0, v[i+1] = 1, and v[i+2:n] is stored in A[i+2:n,i]. +// +// The vectors v form the n×nb matrix V which is used with W to apply a +// symmetric rank-2 update to the unreduced part of A +// A = A - V * W^T - W * V^T +// +// Dlatrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlatrd(uplo blas.Uplo, n, nb int, a []float64, lda int, e, tau, w []float64, ldw int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case nb < 0: + panic(nbLT0) + case nb > n: + panic(nbGTN) + case lda < max(1, n): + panic(badLdA) + case ldw < max(1, nb): + panic(badLdW) + } + + if n == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(w) < (n-1)*ldw+nb: + panic(shortW) + case len(e) < n-1: + panic(shortE) + case len(tau) < n-1: + panic(shortTau) + } + + bi := blas64.Implementation() + + if uplo == blas.Upper { + for i := n - 1; i >= n-nb; i-- { + iw := i - n + nb + if i < n-1 { + // Update A(0:i, i). + bi.Dgemv(blas.NoTrans, i+1, n-i-1, -1, a[i+1:], lda, + w[i*ldw+iw+1:], 1, 1, a[i:], lda) + bi.Dgemv(blas.NoTrans, i+1, n-i-1, -1, w[iw+1:], ldw, + a[i*lda+i+1:], 1, 1, a[i:], lda) + } + if i > 0 { + // Generate elementary reflector H_i to annihilate A(0:i-2,i). + e[i-1], tau[i-1] = impl.Dlarfg(i, a[(i-1)*lda+i], a[i:], lda) + a[(i-1)*lda+i] = 1 + + // Compute W(0:i-1, i). + bi.Dsymv(blas.Upper, i, 1, a, lda, a[i:], lda, 0, w[iw:], ldw) + if i < n-1 { + bi.Dgemv(blas.Trans, i, n-i-1, 1, w[iw+1:], ldw, + a[i:], lda, 0, w[(i+1)*ldw+iw:], ldw) + bi.Dgemv(blas.NoTrans, i, n-i-1, -1, a[i+1:], lda, + w[(i+1)*ldw+iw:], ldw, 1, w[iw:], ldw) + bi.Dgemv(blas.Trans, i, n-i-1, 1, a[i+1:], lda, + a[i:], lda, 0, w[(i+1)*ldw+iw:], ldw) + bi.Dgemv(blas.NoTrans, i, n-i-1, -1, w[iw+1:], ldw, + w[(i+1)*ldw+iw:], ldw, 1, w[iw:], ldw) + } + bi.Dscal(i, tau[i-1], w[iw:], ldw) + alpha := -0.5 * tau[i-1] * bi.Ddot(i, w[iw:], ldw, a[i:], lda) + bi.Daxpy(i, alpha, a[i:], lda, w[iw:], ldw) + } + } + } else { + // Reduce first nb columns of lower triangle. + for i := 0; i < nb; i++ { + // Update A(i:n, i) + bi.Dgemv(blas.NoTrans, n-i, i, -1, a[i*lda:], lda, + w[i*ldw:], 1, 1, a[i*lda+i:], lda) + bi.Dgemv(blas.NoTrans, n-i, i, -1, w[i*ldw:], ldw, + a[i*lda:], 1, 1, a[i*lda+i:], lda) + if i < n-1 { + // Generate elementary reflector H_i to annihilate A(i+2:n,i). + e[i], tau[i] = impl.Dlarfg(n-i-1, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + a[(i+1)*lda+i] = 1 + + // Compute W(i+1:n,i). + bi.Dsymv(blas.Lower, n-i-1, 1, a[(i+1)*lda+i+1:], lda, + a[(i+1)*lda+i:], lda, 0, w[(i+1)*ldw+i:], ldw) + bi.Dgemv(blas.Trans, n-i-1, i, 1, w[(i+1)*ldw:], ldw, + a[(i+1)*lda+i:], lda, 0, w[i:], ldw) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, a[(i+1)*lda:], lda, + w[i:], ldw, 1, w[(i+1)*ldw+i:], ldw) + bi.Dgemv(blas.Trans, n-i-1, i, 1, a[(i+1)*lda:], lda, + a[(i+1)*lda+i:], lda, 0, w[i:], ldw) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, w[(i+1)*ldw:], ldw, + w[i:], ldw, 1, w[(i+1)*ldw+i:], ldw) + bi.Dscal(n-i-1, tau[i], w[(i+1)*ldw+i:], ldw) + alpha := -0.5 * tau[i] * bi.Ddot(n-i-1, w[(i+1)*ldw+i:], ldw, + a[(i+1)*lda+i:], lda) + bi.Daxpy(n-i-1, alpha, a[(i+1)*lda+i:], lda, + w[(i+1)*ldw+i:], ldw) + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go new file mode 100644 index 00000000..dc445c6f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go @@ -0,0 +1,359 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlatrs solves a triangular system of equations scaled to prevent overflow. It +// solves +// A * x = scale * b if trans == blas.NoTrans +// A^T * x = scale * b if trans == blas.Trans +// where the scale s is set for numeric stability. +// +// A is an n×n triangular matrix. On entry, the slice x contains the values of +// b, and on exit it contains the solution vector x. +// +// If normin == true, cnorm is an input and cnorm[j] contains the norm of the off-diagonal +// part of the j^th column of A. If trans == blas.NoTrans, cnorm[j] must be greater +// than or equal to the infinity norm, and greater than or equal to the one-norm +// otherwise. If normin == false, then cnorm is treated as an output, and is set +// to contain the 1-norm of the off-diagonal part of the j^th column of A. +// +// Dlatrs is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlatrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, normin bool, n int, a []float64, lda int, x []float64, cnorm []float64) (scale float64) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTrans) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return 0 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(x) < n: + panic(shortX) + case len(cnorm) < n: + panic(shortCNorm) + } + + upper := uplo == blas.Upper + nonUnit := diag == blas.NonUnit + + smlnum := dlamchS / dlamchP + bignum := 1 / smlnum + scale = 1 + + bi := blas64.Implementation() + + if !normin { + if upper { + cnorm[0] = 0 + for j := 1; j < n; j++ { + cnorm[j] = bi.Dasum(j, a[j:], lda) + } + } else { + for j := 0; j < n-1; j++ { + cnorm[j] = bi.Dasum(n-j-1, a[(j+1)*lda+j:], lda) + } + cnorm[n-1] = 0 + } + } + // Scale the column norms by tscal if the maximum element in cnorm is greater than bignum. + imax := bi.Idamax(n, cnorm, 1) + tmax := cnorm[imax] + var tscal float64 + if tmax <= bignum { + tscal = 1 + } else { + tscal = 1 / (smlnum * tmax) + bi.Dscal(n, tscal, cnorm, 1) + } + + // Compute a bound on the computed solution vector to see if bi.Dtrsv can be used. + j := bi.Idamax(n, x, 1) + xmax := math.Abs(x[j]) + xbnd := xmax + var grow float64 + var jfirst, jlast, jinc int + if trans == blas.NoTrans { + if upper { + jfirst = n - 1 + jlast = -1 + jinc = -1 + } else { + jfirst = 0 + jlast = n + jinc = 1 + } + // Compute the growth in A * x = b. + if tscal != 1 { + grow = 0 + goto Solve + } + if nonUnit { + grow = 1 / math.Max(xbnd, smlnum) + xbnd = grow + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + tjj := math.Abs(a[j*lda+j]) + xbnd = math.Min(xbnd, math.Min(1, tjj)*grow) + if tjj+cnorm[j] >= smlnum { + grow *= tjj / (tjj + cnorm[j]) + } else { + grow = 0 + } + } + grow = xbnd + } else { + grow = math.Min(1, 1/math.Max(xbnd, smlnum)) + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + grow *= 1 / (1 + cnorm[j]) + } + } + } else { + if upper { + jfirst = 0 + jlast = n + jinc = 1 + } else { + jfirst = n - 1 + jlast = -1 + jinc = -1 + } + if tscal != 1 { + grow = 0 + goto Solve + } + if nonUnit { + grow = 1 / (math.Max(xbnd, smlnum)) + xbnd = grow + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + xj := 1 + cnorm[j] + grow = math.Min(grow, xbnd/xj) + tjj := math.Abs(a[j*lda+j]) + if xj > tjj { + xbnd *= tjj / xj + } + } + grow = math.Min(grow, xbnd) + } else { + grow = math.Min(1, 1/math.Max(xbnd, smlnum)) + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + xj := 1 + cnorm[j] + grow /= xj + } + } + } + +Solve: + if grow*tscal > smlnum { + // Use the Level 2 BLAS solve if the reciprocal of the bound on + // elements of X is not too small. + bi.Dtrsv(uplo, trans, diag, n, a, lda, x, 1) + if tscal != 1 { + bi.Dscal(n, 1/tscal, cnorm, 1) + } + return scale + } + + // Use a Level 1 BLAS solve, scaling intermediate results. + if xmax > bignum { + scale = bignum / xmax + bi.Dscal(n, scale, x, 1) + xmax = bignum + } + if trans == blas.NoTrans { + for j := jfirst; j != jlast; j += jinc { + xj := math.Abs(x[j]) + var tjj, tjjs float64 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + if tscal == 1 { + goto Skip1 + } + } + tjj = math.Abs(tjjs) + if tjj > smlnum { + if tjj < 1 { + if xj > tjj*bignum { + rec := 1 / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + x[j] /= tjjs + xj = math.Abs(x[j]) + } else if tjj > 0 { + if xj > tjj*bignum { + rec := (tjj * bignum) / xj + if cnorm[j] > 1 { + rec /= cnorm[j] + } + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + x[j] /= tjjs + xj = math.Abs(x[j]) + } else { + for i := 0; i < n; i++ { + x[i] = 0 + } + x[j] = 1 + xj = 1 + scale = 0 + xmax = 0 + } + Skip1: + if xj > 1 { + rec := 1 / xj + if cnorm[j] > (bignum-xmax)*rec { + rec *= 0.5 + bi.Dscal(n, rec, x, 1) + scale *= rec + } + } else if xj*cnorm[j] > bignum-xmax { + bi.Dscal(n, 0.5, x, 1) + scale *= 0.5 + } + if upper { + if j > 0 { + bi.Daxpy(j, -x[j]*tscal, a[j:], lda, x, 1) + i := bi.Idamax(j, x, 1) + xmax = math.Abs(x[i]) + } + } else { + if j < n-1 { + bi.Daxpy(n-j-1, -x[j]*tscal, a[(j+1)*lda+j:], lda, x[j+1:], 1) + i := j + bi.Idamax(n-j-1, x[j+1:], 1) + xmax = math.Abs(x[i]) + } + } + } + } else { + for j := jfirst; j != jlast; j += jinc { + xj := math.Abs(x[j]) + uscal := tscal + rec := 1 / math.Max(xmax, 1) + var tjjs float64 + if cnorm[j] > (bignum-xj)*rec { + rec *= 0.5 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + } + tjj := math.Abs(tjjs) + if tjj > 1 { + rec = math.Min(1, rec*tjj) + uscal /= tjjs + } + if rec < 1 { + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + var sumj float64 + if uscal == 1 { + if upper { + sumj = bi.Ddot(j, a[j:], lda, x, 1) + } else if j < n-1 { + sumj = bi.Ddot(n-j-1, a[(j+1)*lda+j:], lda, x[j+1:], 1) + } + } else { + if upper { + for i := 0; i < j; i++ { + sumj += (a[i*lda+j] * uscal) * x[i] + } + } else if j < n { + for i := j + 1; i < n; i++ { + sumj += (a[i*lda+j] * uscal) * x[i] + } + } + } + if uscal == tscal { + x[j] -= sumj + xj := math.Abs(x[j]) + var tjjs float64 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + if tscal == 1 { + goto Skip2 + } + } + tjj := math.Abs(tjjs) + if tjj > smlnum { + if tjj < 1 { + if xj > tjj*bignum { + rec = 1 / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + x[j] /= tjjs + } else if tjj > 0 { + if xj > tjj*bignum { + rec = (tjj * bignum) / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + x[j] /= tjjs + } else { + for i := 0; i < n; i++ { + x[i] = 0 + } + x[j] = 1 + scale = 0 + xmax = 0 + } + } else { + x[j] = x[j]/tjjs - sumj + } + Skip2: + xmax = math.Max(xmax, math.Abs(x[j])) + } + } + scale /= tscal + if tscal != 1 { + bi.Dscal(n, 1/tscal, cnorm, 1) + } + return scale +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go new file mode 100644 index 00000000..ecce22cc --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go @@ -0,0 +1,64 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlauu2 computes the product +// U * U^T if uplo is blas.Upper +// L^T * L if uplo is blas.Lower +// where U or L is stored in the upper or lower triangular part of A. +// Only the upper or lower triangle of the result is stored, overwriting +// the corresponding factor in A. +func (impl Implementation) Dlauu2(uplo blas.Uplo, n int, a []float64, lda int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + bi := blas64.Implementation() + + if uplo == blas.Upper { + // Compute the product U*U^T. + for i := 0; i < n; i++ { + aii := a[i*lda+i] + if i < n-1 { + a[i*lda+i] = bi.Ddot(n-i, a[i*lda+i:], 1, a[i*lda+i:], 1) + bi.Dgemv(blas.NoTrans, i, n-i-1, 1, a[i+1:], lda, a[i*lda+i+1:], 1, + aii, a[i:], lda) + } else { + bi.Dscal(i+1, aii, a[i:], lda) + } + } + } else { + // Compute the product L^T*L. + for i := 0; i < n; i++ { + aii := a[i*lda+i] + if i < n-1 { + a[i*lda+i] = bi.Ddot(n-i, a[i*lda+i:], lda, a[i*lda+i:], lda) + bi.Dgemv(blas.Trans, n-i-1, i, 1, a[(i+1)*lda:], lda, a[(i+1)*lda+i:], lda, + aii, a[i*lda:], 1) + } else { + bi.Dscal(i+1, aii, a[i*lda:], 1) + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go new file mode 100644 index 00000000..67ecaddf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go @@ -0,0 +1,81 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlauum computes the product +// U * U^T if uplo is blas.Upper +// L^T * L if uplo is blas.Lower +// where U or L is stored in the upper or lower triangular part of A. +// Only the upper or lower triangle of the result is stored, overwriting +// the corresponding factor in A. +func (impl Implementation) Dlauum(uplo blas.Uplo, n int, a []float64, lda int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + // Determine the block size. + opts := "U" + if uplo == blas.Lower { + opts = "L" + } + nb := impl.Ilaenv(1, "DLAUUM", opts, n, -1, -1, -1) + + if nb <= 1 || n <= nb { + // Use unblocked code. + impl.Dlauu2(uplo, n, a, lda) + return + } + + // Use blocked code. + bi := blas64.Implementation() + if uplo == blas.Upper { + // Compute the product U*U^T. + for i := 0; i < n; i += nb { + ib := min(nb, n-i) + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.NonUnit, + i, ib, 1, a[i*lda+i:], lda, a[i:], lda) + impl.Dlauu2(blas.Upper, ib, a[i*lda+i:], lda) + if n-i-ib > 0 { + bi.Dgemm(blas.NoTrans, blas.Trans, i, ib, n-i-ib, + 1, a[i+ib:], lda, a[i*lda+i+ib:], lda, 1, a[i:], lda) + bi.Dsyrk(blas.Upper, blas.NoTrans, ib, n-i-ib, + 1, a[i*lda+i+ib:], lda, 1, a[i*lda+i:], lda) + } + } + } else { + // Compute the product L^T*L. + for i := 0; i < n; i += nb { + ib := min(nb, n-i) + bi.Dtrmm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, + ib, i, 1, a[i*lda+i:], lda, a[i*lda:], lda) + impl.Dlauu2(blas.Lower, ib, a[i*lda+i:], lda) + if n-i-ib > 0 { + bi.Dgemm(blas.Trans, blas.NoTrans, ib, i, n-i-ib, + 1, a[(i+ib)*lda+i:], lda, a[(i+ib)*lda:], lda, 1, a[i*lda:], lda) + bi.Dsyrk(blas.Lower, blas.Trans, ib, n-i-ib, + 1, a[(i+ib)*lda+i:], lda, 1, a[i*lda+i:], lda) + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go new file mode 100644 index 00000000..57942892 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go @@ -0,0 +1,28 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gonum is a pure-go implementation of the LAPACK API. The LAPACK API defines +// a set of algorithms for advanced matrix operations. +// +// The function definitions and implementations follow that of the netlib reference +// implementation. See http://www.netlib.org/lapack/explore-html/ for more +// information, and http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html +// for more license information. +// +// Slice function arguments frequently represent vectors and matrices. The data +// layout is identical to that found in https://godoc.org/gonum.org/v1/gonum/blas/gonum. +// +// Most LAPACK functions are built on top the routines defined in the BLAS API, +// and as such the computation time for many LAPACK functions is +// dominated by BLAS calls. Here, BLAS is accessed through the +// blas64 package (https://godoc.org/golang.org/v1/gonum/blas/blas64). In particular, +// this implies that an external BLAS library will be used if it is +// registered in blas64. +// +// The full LAPACK capability has not been implemented at present. The full +// API is very large, containing approximately 200 functions for double precision +// alone. Future additions will be focused on supporting the gonum matrix +// package (https://godoc.org/github.com/gonum/matrix/mat64), though pull requests +// with implementations and tests for LAPACK function are encouraged. +package gonum // import "gonum.org/v1/gonum/lapack/gonum" diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go new file mode 100644 index 00000000..a20765a9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go @@ -0,0 +1,76 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorg2l generates an m×n matrix Q with orthonormal columns which is defined +// as the last n columns of a product of k elementary reflectors of order m. +// Q = H_{k-1} * ... * H_1 * H_0 +// See Dgelqf for more information. It must be that m >= n >= k. +// +// tau contains the scalar reflectors computed by Dgeqlf. tau must have length +// at least k, and Dorg2l will panic otherwise. +// +// work contains temporary memory, and must have length at least n. Dorg2l will +// panic otherwise. +// +// Dorg2l is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorg2l(m, n, k int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case n > m: + panic(nGTM) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < n: + panic(shortWork) + } + + // Initialize columns 0:n-k to columns of the unit matrix. + for j := 0; j < n-k; j++ { + for l := 0; l < m; l++ { + a[l*lda+j] = 0 + } + a[(m-n+j)*lda+j] = 1 + } + + bi := blas64.Implementation() + for i := 0; i < k; i++ { + ii := n - k + i + + // Apply H_i to A[0:m-k+i, 0:n-k+i] from the left. + a[(m-n+ii)*lda+ii] = 1 + impl.Dlarf(blas.Left, m-n+ii+1, ii, a[ii:], lda, tau[i], a, lda, work) + bi.Dscal(m-n+ii, -tau[i], a[ii:], lda) + a[(m-n+ii)*lda+ii] = 1 - tau[i] + + // Set A[m-k+i:m, n-k+i+1] to zero. + for l := m - n + ii + 1; l < m; l++ { + a[l*lda+ii] = 0 + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go new file mode 100644 index 00000000..de447757 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go @@ -0,0 +1,75 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorg2r generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors as computed by Dgeqrf. +// Q = H_0 * H_1 * ... * H_{k-1} +// len(tau) >= k, 0 <= k <= n, 0 <= n <= m, len(work) >= n. +// Dorg2r will panic if these conditions are not met. +// +// Dorg2r is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorg2r(m, n, k int, a []float64, lda int, tau []float64, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case n > m: + panic(nGTM) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < n: + panic(shortWork) + } + + bi := blas64.Implementation() + + // Initialize columns k+1:n to columns of the unit matrix. + for l := 0; l < m; l++ { + for j := k; j < n; j++ { + a[l*lda+j] = 0 + } + } + for j := k; j < n; j++ { + a[j*lda+j] = 1 + } + for i := k - 1; i >= 0; i-- { + for i := range work { + work[i] = 0 + } + if i < n-1 { + a[i*lda+i] = 1 + impl.Dlarf(blas.Left, m-i, n-i-1, a[i*lda+i:], lda, tau[i], a[i*lda+i+1:], lda, work) + } + if i < m-1 { + bi.Dscal(m-i-1, -tau[i], a[(i+1)*lda+i:], lda) + } + a[i*lda+i] = 1 - tau[i] + for l := 0; l < i; l++ { + a[l*lda+i] = 0 + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go new file mode 100644 index 00000000..626cad5f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go @@ -0,0 +1,138 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/lapack" + +// Dorgbr generates one of the matrices Q or P^T computed by Dgebrd +// computed from the decomposition Dgebrd. See Dgebd2 for the description of +// Q and P^T. +// +// If vect == lapack.GenerateQ, then a is assumed to have been an m×k matrix and +// Q is of order m. If m >= k, then Dorgbr returns the first n columns of Q +// where m >= n >= k. If m < k, then Dorgbr returns Q as an m×m matrix. +// +// If vect == lapack.GeneratePT, then A is assumed to have been a k×n matrix, and +// P^T is of order n. If k < n, then Dorgbr returns the first m rows of P^T, +// where n >= m >= k. If k >= n, then Dorgbr returns P^T as an n×n matrix. +// +// Dorgbr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgbr(vect lapack.GenOrtho, m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + wantq := vect == lapack.GenerateQ + mn := min(m, n) + switch { + case vect != lapack.GenerateQ && vect != lapack.GeneratePT: + panic(badGenOrtho) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case wantq && n > m: + panic(nGTM) + case wantq && n < min(m, k): + panic("lapack: n < min(m,k)") + case !wantq && m > n: + panic(mGTN) + case !wantq && m < min(n, k): + panic("lapack: m < min(n,k)") + case lda < max(1, n) && lwork != -1: + // Normally, we follow the reference and require the leading + // dimension to be always valid, even in case of workspace + // queries. However, if a caller provided a placeholder value + // for lda (and a) when doing a workspace query that didn't + // fulfill the condition here, it would cause a panic. This is + // exactly what Dgesvd does. + panic(badLdA) + case lwork < max(1, mn) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + work[0] = 1 + if m == 0 || n == 0 { + return + } + + if wantq { + if m >= k { + impl.Dorgqr(m, n, k, a, lda, tau, work, -1) + } else if m > 1 { + impl.Dorgqr(m-1, m-1, m-1, a[lda+1:], lda, tau, work, -1) + } + } else { + if k < n { + impl.Dorglq(m, n, k, a, lda, tau, work, -1) + } else if n > 1 { + impl.Dorglq(n-1, n-1, n-1, a[lda+1:], lda, tau, work, -1) + } + } + lworkopt := int(work[0]) + lworkopt = max(lworkopt, mn) + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case wantq && len(tau) < min(m, k): + panic(shortTau) + case !wantq && len(tau) < min(n, k): + panic(shortTau) + } + + if wantq { + // Form Q, determined by a call to Dgebrd to reduce an m×k matrix. + if m >= k { + impl.Dorgqr(m, n, k, a, lda, tau, work, lwork) + } else { + // Shift the vectors which define the elementary reflectors one + // column to the right, and set the first row and column of Q to + // those of the unit matrix. + for j := m - 1; j >= 1; j-- { + a[j] = 0 + for i := j + 1; i < m; i++ { + a[i*lda+j] = a[i*lda+j-1] + } + } + a[0] = 1 + for i := 1; i < m; i++ { + a[i*lda] = 0 + } + if m > 1 { + // Form Q[1:m-1, 1:m-1] + impl.Dorgqr(m-1, m-1, m-1, a[lda+1:], lda, tau, work, lwork) + } + } + } else { + // Form P^T, determined by a call to Dgebrd to reduce a k×n matrix. + if k < n { + impl.Dorglq(m, n, k, a, lda, tau, work, lwork) + } else { + // Shift the vectors which define the elementary reflectors one + // row downward, and set the first row and column of P^T to + // those of the unit matrix. + a[0] = 1 + for i := 1; i < n; i++ { + a[i*lda] = 0 + } + for j := 1; j < n; j++ { + for i := j - 1; i >= 1; i-- { + a[i*lda+j] = a[(i-1)*lda+j] + } + a[j] = 0 + } + if n > 1 { + impl.Dorglq(n-1, n-1, n-1, a[lda+1:], lda, tau, work, lwork) + } + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go new file mode 100644 index 00000000..6e799d10 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go @@ -0,0 +1,101 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Dorghr generates an n×n orthogonal matrix Q which is defined as the product +// of ihi-ilo elementary reflectors: +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// +// a and lda represent an n×n matrix that contains the elementary reflectors, as +// returned by Dgehrd. On return, a is overwritten by the n×n orthogonal matrix +// Q. Q will be equal to the identity matrix except in the submatrix +// Q[ilo+1:ihi+1,ilo+1:ihi+1]. +// +// ilo and ihi must have the same values as in the previous call of Dgehrd. It +// must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo = 0, ihi = -1, if n == 0. +// +// tau contains the scalar factors of the elementary reflectors, as returned by +// Dgehrd. tau must have length n-1. +// +// work must have length at least max(1,lwork) and lwork must be at least +// ihi-ilo. For optimum performance lwork must be at least (ihi-ilo)*nb where nb +// is the optimal blocksize. On return, work[0] will contain the optimal value +// of lwork. +// +// If lwork == -1, instead of performing Dorghr, only the optimal value of lwork +// will be stored into work[0]. +// +// If any requirement on input sizes is not met, Dorghr will panic. +// +// Dorghr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorghr(n, ilo, ihi int, a []float64, lda int, tau, work []float64, lwork int) { + nh := ihi - ilo + switch { + case ilo < 0 || max(1, n) <= ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, nh) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + lwkopt := max(1, nh) * impl.Ilaenv(1, "DORGQR", " ", nh, nh, nh, -1) + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(tau) < n-1: + panic(shortTau) + } + + // Shift the vectors which define the elementary reflectors one column + // to the right. + for i := ilo + 2; i < ihi+1; i++ { + copy(a[i*lda+ilo+1:i*lda+i], a[i*lda+ilo:i*lda+i-1]) + } + // Set the first ilo+1 and the last n-ihi-1 rows and columns to those of + // the identity matrix. + for i := 0; i < ilo+1; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + a[i*lda+i] = 1 + } + for i := ilo + 1; i < ihi+1; i++ { + for j := 0; j <= ilo; j++ { + a[i*lda+j] = 0 + } + for j := i; j < n; j++ { + a[i*lda+j] = 0 + } + } + for i := ihi + 1; i < n; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + a[i*lda+i] = 1 + } + if nh > 0 { + // Generate Q[ilo+1:ihi+1,ilo+1:ihi+1]. + impl.Dorgqr(nh, nh, nh, a[(ilo+1)*lda+ilo+1:], lda, tau[ilo:ihi], work, lwork) + } + work[0] = float64(lwkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go new file mode 100644 index 00000000..b5566b9d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go @@ -0,0 +1,71 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorgl2 generates an m×n matrix Q with orthonormal rows defined by the +// first m rows product of elementary reflectors as computed by Dgelqf. +// Q = H_0 * H_1 * ... * H_{k-1} +// len(tau) >= k, 0 <= k <= m, 0 <= m <= n, len(work) >= m. +// Dorgl2 will panic if these conditions are not met. +// +// Dorgl2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgl2(m, n, k int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < m: + panic(nLTM) + case k < 0: + panic(kLT0) + case k > m: + panic(kGTM) + case lda < max(1, m): + panic(badLdA) + } + + if m == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < m: + panic(shortWork) + } + + bi := blas64.Implementation() + + if k < m { + for i := k; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + } + for j := k; j < m; j++ { + a[j*lda+j] = 1 + } + } + for i := k - 1; i >= 0; i-- { + if i < n-1 { + if i < m-1 { + a[i*lda+i] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i, a[i*lda+i:], 1, tau[i], a[(i+1)*lda+i:], lda, work) + } + bi.Dscal(n-i-1, -tau[i], a[i*lda+i+1:], 1) + } + a[i*lda+i] = 1 - tau[i] + for l := 0; l < i; l++ { + a[i*lda+l] = 0 + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go new file mode 100644 index 00000000..a6dd980c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go @@ -0,0 +1,123 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorglq generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors as computed by Dgelqf. +// Q = H_0 * H_1 * ... * H_{k-1} +// Dorglq is the blocked version of Dorgl2 that makes greater use of level-3 BLAS +// routines. +// +// len(tau) >= k, 0 <= k <= m, and 0 <= m <= n. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= m, and the amount of blocking is limited by the usable length. +// If lwork == -1, instead of computing Dorglq the optimal work length is stored +// into work[0]. +// +// Dorglq will panic if the conditions on input values are not met. +// +// Dorglq is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorglq(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < m: + panic(nLTM) + case k < 0: + panic(kLT0) + case k > m: + panic(kGTM) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, m) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + if m == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DORGLQ", " ", m, n, k, -1) + if lwork == -1 { + work[0] = float64(m * nb) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + nbmin := 2 // Minimum block size + var nx int // Crossover size from blocked to unbloked code + iws := m // Length of work needed + var ldwork int + if 1 < nb && nb < k { + nx = max(0, impl.Ilaenv(3, "DORGLQ", " ", m, n, k, -1)) + if nx < k { + ldwork = nb + iws = m * ldwork + if lwork < iws { + nb = lwork / m + ldwork = nb + nbmin = max(2, impl.Ilaenv(2, "DORGLQ", " ", m, n, k, -1)) + } + } + } + + var ki, kk int + if nbmin <= nb && nb < k && nx < k { + // The first kk rows are handled by the blocked method. + ki = ((k - nx - 1) / nb) * nb + kk = min(k, ki+nb) + for i := kk; i < m; i++ { + for j := 0; j < kk; j++ { + a[i*lda+j] = 0 + } + } + } + if kk < m { + // Perform the operation on colums kk to the end. + impl.Dorgl2(m-kk, n-kk, k-kk, a[kk*lda+kk:], lda, tau[kk:], work) + } + if kk > 0 { + // Perform the operation on column-blocks + for i := ki; i >= 0; i -= nb { + ib := min(nb, k-i) + if i+ib < m { + impl.Dlarft(lapack.Forward, lapack.RowWise, + n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + + impl.Dlarfb(blas.Right, blas.Trans, lapack.Forward, lapack.RowWise, + m-i-ib, n-i, ib, + a[i*lda+i:], lda, + work, ldwork, + a[(i+ib)*lda+i:], lda, + work[ib*ldwork:], ldwork) + } + impl.Dorgl2(ib, n-i, ib, a[i*lda+i:], lda, tau[i:], work) + for l := i; l < i+ib; l++ { + for j := 0; j < i; j++ { + a[l*lda+j] = 0 + } + } + } + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go new file mode 100644 index 00000000..6927ba4c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go @@ -0,0 +1,136 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorgql generates the m×n matrix Q with orthonormal columns defined as the +// last n columns of a product of k elementary reflectors of order m +// Q = H_{k-1} * ... * H_1 * H_0. +// +// It must hold that +// 0 <= k <= n <= m, +// and Dorgql will panic otherwise. +// +// On entry, the (n-k+i)-th column of A must contain the vector which defines +// the elementary reflector H_i, for i=0,...,k-1, and tau[i] must contain its +// scalar factor. On return, a contains the m×n matrix Q. +// +// tau must have length at least k, and Dorgql will panic otherwise. +// +// work must have length at least max(1,lwork), and lwork must be at least +// max(1,n), otherwise Dorgql will panic. For optimum performance lwork must +// be a sufficiently large multiple of n. +// +// If lwork == -1, instead of computing Dorgql the optimal work length is stored +// into work[0]. +// +// Dorgql is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgql(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case n > m: + panic(nGTM) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DORGQL", " ", m, n, k, -1) + if lwork == -1 { + work[0] = float64(n * nb) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + nbmin := 2 + var nx, ldwork int + iws := n + if 1 < nb && nb < k { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(3, "DORGQL", " ", m, n, k, -1)) + if nx < k { + // Determine if workspace is large enough for blocked code. + iws = n * nb + if lwork < iws { + // Not enough workspace to use optimal nb: reduce nb and determine + // the minimum value of nb. + nb = lwork / n + nbmin = max(2, impl.Ilaenv(2, "DORGQL", " ", m, n, k, -1)) + } + ldwork = nb + } + } + + var kk int + if nbmin <= nb && nb < k && nx < k { + // Use blocked code after the first block. The last kk columns are handled + // by the block method. + kk = min(k, ((k-nx+nb-1)/nb)*nb) + + // Set A(m-kk:m, 0:n-kk) to zero. + for i := m - kk; i < m; i++ { + for j := 0; j < n-kk; j++ { + a[i*lda+j] = 0 + } + } + } + + // Use unblocked code for the first or only block. + impl.Dorg2l(m-kk, n-kk, k-kk, a, lda, tau, work) + if kk > 0 { + // Use blocked code. + for i := k - kk; i < k; i += nb { + ib := min(nb, k-i) + if n-k+i > 0 { + // Form the triangular factor of the block reflector + // H = H_{i+ib-1} * ... * H_{i+1} * H_i. + impl.Dlarft(lapack.Backward, lapack.ColumnWise, m-k+i+ib, ib, + a[n-k+i:], lda, tau[i:], work, ldwork) + + // Apply H to A[0:m-k+i+ib, 0:n-k+i] from the left. + impl.Dlarfb(blas.Left, blas.NoTrans, lapack.Backward, lapack.ColumnWise, + m-k+i+ib, n-k+i, ib, a[n-k+i:], lda, work, ldwork, + a, lda, work[ib*ldwork:], ldwork) + } + + // Apply H to rows 0:m-k+i+ib of current block. + impl.Dorg2l(m-k+i+ib, ib, ib, a[n-k+i:], lda, tau[i:], work) + + // Set rows m-k+i+ib:m of current block to zero. + for j := n - k + i; j < n-k+i+ib; j++ { + for l := m - k + i + ib; l < m; l++ { + a[l*lda+j] = 0 + } + } + } + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go new file mode 100644 index 00000000..f07fdaf4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorgqr generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1} +// as computed by Dgeqrf. +// Dorgqr is the blocked version of Dorg2r that makes greater use of level-3 BLAS +// routines. +// +// The length of tau must be at least k, and the length of work must be at least n. +// It also must be that 0 <= k <= n and 0 <= n <= m. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= n, and the amount of blocking is limited by the usable +// length. If lwork == -1, instead of computing Dorgqr the optimal work length +// is stored into work[0]. +// +// Dorgqr will panic if the conditions on input values are not met. +// +// Dorgqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgqr(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case n > m: + panic(nGTM) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lda < max(1, n) && lwork != -1: + // Normally, we follow the reference and require the leading + // dimension to be always valid, even in case of workspace + // queries. However, if a caller provided a placeholder value + // for lda (and a) when doing a workspace query that didn't + // fulfill the condition here, it would cause a panic. This is + // exactly what Dgesvd does. + panic(badLdA) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + if n == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DORGQR", " ", m, n, k, -1) + // work is treated as an n×nb matrix + if lwork == -1 { + work[0] = float64(n * nb) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + nbmin := 2 // Minimum block size + var nx int // Crossover size from blocked to unbloked code + iws := n // Length of work needed + var ldwork int + if 1 < nb && nb < k { + nx = max(0, impl.Ilaenv(3, "DORGQR", " ", m, n, k, -1)) + if nx < k { + ldwork = nb + iws = n * ldwork + if lwork < iws { + nb = lwork / n + ldwork = nb + nbmin = max(2, impl.Ilaenv(2, "DORGQR", " ", m, n, k, -1)) + } + } + } + var ki, kk int + if nbmin <= nb && nb < k && nx < k { + // The first kk columns are handled by the blocked method. + ki = ((k - nx - 1) / nb) * nb + kk = min(k, ki+nb) + for i := 0; i < kk; i++ { + for j := kk; j < n; j++ { + a[i*lda+j] = 0 + } + } + } + if kk < n { + // Perform the operation on colums kk to the end. + impl.Dorg2r(m-kk, n-kk, k-kk, a[kk*lda+kk:], lda, tau[kk:], work) + } + if kk > 0 { + // Perform the operation on column-blocks. + for i := ki; i >= 0; i -= nb { + ib := min(nb, k-i) + if i+ib < n { + impl.Dlarft(lapack.Forward, lapack.ColumnWise, + m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + + impl.Dlarfb(blas.Left, blas.NoTrans, lapack.Forward, lapack.ColumnWise, + m-i, n-i-ib, ib, + a[i*lda+i:], lda, + work, ldwork, + a[i*lda+i+ib:], lda, + work[ib*ldwork:], ldwork) + } + impl.Dorg2r(m-i, ib, ib, a[i*lda+i:], lda, tau[i:], work) + // Set rows 0:i-1 of current block to zero. + for j := i; j < i+ib; j++ { + for l := 0; l < i; l++ { + a[l*lda+j] = 0 + } + } + } + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go new file mode 100644 index 00000000..483fbcae --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go @@ -0,0 +1,104 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorgtr generates a real orthogonal matrix Q which is defined as the product +// of n-1 elementary reflectors of order n as returned by Dsytrd. +// +// The construction of Q depends on the value of uplo: +// Q = H_{n-1} * ... * H_1 * H_0 if uplo == blas.Upper +// Q = H_0 * H_1 * ... * H_{n-1} if uplo == blas.Lower +// where H_i is constructed from the elementary reflectors as computed by Dsytrd. +// See the documentation for Dsytrd for more information. +// +// tau must have length at least n-1, and Dorgtr will panic otherwise. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= max(1,n-1), and Dorgtr will panic otherwise. The amount of blocking +// is limited by the usable length. +// If lwork == -1, instead of computing Dorgtr the optimal work length is stored +// into work[0]. +// +// Dorgtr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgtr(uplo blas.Uplo, n int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, n-1) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + if n == 0 { + work[0] = 1 + return + } + + var nb int + if uplo == blas.Upper { + nb = impl.Ilaenv(1, "DORGQL", " ", n-1, n-1, n-1, -1) + } else { + nb = impl.Ilaenv(1, "DORGQR", " ", n-1, n-1, n-1, -1) + } + lworkopt := max(1, n-1) * nb + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(tau) < n-1: + panic(shortTau) + } + + if uplo == blas.Upper { + // Q was determined by a call to Dsytrd with uplo == blas.Upper. + // Shift the vectors which define the elementary reflectors one column + // to the left, and set the last row and column of Q to those of the unit + // matrix. + for j := 0; j < n-1; j++ { + for i := 0; i < j; i++ { + a[i*lda+j] = a[i*lda+j+1] + } + a[(n-1)*lda+j] = 0 + } + for i := 0; i < n-1; i++ { + a[i*lda+n-1] = 0 + } + a[(n-1)*lda+n-1] = 1 + + // Generate Q[0:n-1, 0:n-1]. + impl.Dorgql(n-1, n-1, n-1, a, lda, tau, work, lwork) + } else { + // Q was determined by a call to Dsytrd with uplo == blas.Upper. + // Shift the vectors which define the elementary reflectors one column + // to the right, and set the first row and column of Q to those of the unit + // matrix. + for j := n - 1; j > 0; j-- { + a[j] = 0 + for i := j + 1; i < n; i++ { + a[i*lda+j] = a[i*lda+j-1] + } + } + a[0] = 1 + for i := 1; i < n; i++ { + a[i*lda] = 0 + } + if n > 1 { + // Generate Q[1:n, 1:n]. + impl.Dorgqr(n-1, n-1, n-1, a[lda+1:], lda, tau, work, lwork) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go new file mode 100644 index 00000000..4b0bd83c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go @@ -0,0 +1,101 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorm2r multiplies a general matrix C by an orthogonal matrix from a QR factorization +// determined by Dgeqrf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of size m×k, and if side == blas.Right +// a is of size n×k. +// +// tau contains the Householder factors and is of length at least k and this function +// will panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dorm2r is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorm2r(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + left := side == blas.Left + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.Trans && trans != blas.NoTrans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case lda < max(1, k): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + return + } + + switch { + case left && len(a) < (m-1)*lda+k: + panic(shortA) + case !left && len(a) < (n-1)*lda+k: + panic(shortA) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case len(tau) < k: + panic(shortTau) + case left && len(work) < n: + panic(shortWork) + case !left && len(work) < m: + panic(shortWork) + } + + if left { + if trans == blas.NoTrans { + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + return + } + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + return + } + if trans == blas.NoTrans { + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + return + } + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go new file mode 100644 index 00000000..026dc041 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go @@ -0,0 +1,178 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormbr applies a multiplicative update to the matrix C based on a +// decomposition computed by Dgebrd. +// +// Dormbr overwrites the m×n matrix C with +// Q * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.NoTrans +// C * Q if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.NoTrans +// Q^T * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.Trans +// C * Q^T if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.Trans +// +// P * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.NoTrans +// C * P if vect == lapack.ApplyP, side == blas.Right, and trans == blas.NoTrans +// P^T * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.Trans +// C * P^T if vect == lapack.ApplyP, side == blas.Right, and trans == blas.Trans +// where P and Q are the orthogonal matrices determined by Dgebrd when reducing +// a matrix A to bidiagonal form: A = Q * B * P^T. See Dgebrd for the +// definitions of Q and P. +// +// If vect == lapack.ApplyQ, A is assumed to have been an nq×k matrix, while if +// vect == lapack.ApplyP, A is assumed to have been a k×nq matrix. nq = m if +// side == blas.Left, while nq = n if side == blas.Right. +// +// tau must have length min(nq,k), and Dormbr will panic otherwise. tau contains +// the elementary reflectors to construct Q or P depending on the value of +// vect. +// +// work must have length at least max(1,lwork), and lwork must be either -1 or +// at least max(1,n) if side == blas.Left, and at least max(1,m) if side == +// blas.Right. For optimum performance lwork should be at least n*nb if side == +// blas.Left, and at least m*nb if side == blas.Right, where nb is the optimal +// block size. On return, work[0] will contain the optimal value of lwork. +// +// If lwork == -1, the function only calculates the optimal value of lwork and +// returns it in work[0]. +// +// Dormbr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormbr(vect lapack.ApplyOrtho, side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + nq := n + nw := m + if side == blas.Left { + nq = m + nw = n + } + applyQ := vect == lapack.ApplyQ + switch { + case !applyQ && vect != lapack.ApplyP: + panic(badApplyOrtho) + case side != blas.Left && side != blas.Right: + panic(badSide) + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case applyQ && lda < max(1, min(nq, k)): + panic(badLdA) + case !applyQ && lda < max(1, nq): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + case lwork < max(1, nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if m == 0 || n == 0 { + work[0] = 1 + return + } + + // The current implementation does not use opts, but a future change may + // use these options so construct them. + var opts string + if side == blas.Left { + opts = "L" + } else { + opts = "R" + } + if trans == blas.Trans { + opts += "T" + } else { + opts += "N" + } + var nb int + if applyQ { + if side == blas.Left { + nb = impl.Ilaenv(1, "DORMQR", opts, m-1, n, m-1, -1) + } else { + nb = impl.Ilaenv(1, "DORMQR", opts, m, n-1, n-1, -1) + } + } else { + if side == blas.Left { + nb = impl.Ilaenv(1, "DORMLQ", opts, m-1, n, m-1, -1) + } else { + nb = impl.Ilaenv(1, "DORMLQ", opts, m, n-1, n-1, -1) + } + } + lworkopt := max(1, nw) * nb + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + minnqk := min(nq, k) + switch { + case applyQ && len(a) < (nq-1)*lda+minnqk: + panic(shortA) + case !applyQ && len(a) < (minnqk-1)*lda+nq: + panic(shortA) + case len(tau) < minnqk: + panic(shortTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + } + + if applyQ { + // Change the operation to get Q depending on the size of the initial + // matrix to Dgebrd. The size matters due to the storage location of + // the off-diagonal elements. + if nq >= k { + impl.Dormqr(side, trans, m, n, k, a, lda, tau[:k], c, ldc, work, lwork) + } else if nq > 1 { + mi := m + ni := n - 1 + i1 := 0 + i2 := 1 + if side == blas.Left { + mi = m - 1 + ni = n + i1 = 1 + i2 = 0 + } + impl.Dormqr(side, trans, mi, ni, nq-1, a[1*lda:], lda, tau[:nq-1], c[i1*ldc+i2:], ldc, work, lwork) + } + work[0] = float64(lworkopt) + return + } + + transt := blas.Trans + if trans == blas.Trans { + transt = blas.NoTrans + } + + // Change the operation to get P depending on the size of the initial + // matrix to Dgebrd. The size matters due to the storage location of + // the off-diagonal elements. + if nq > k { + impl.Dormlq(side, transt, m, n, k, a, lda, tau, c, ldc, work, lwork) + } else if nq > 1 { + mi := m + ni := n - 1 + i1 := 0 + i2 := 1 + if side == blas.Left { + mi = m - 1 + ni = n + i1 = 1 + i2 = 0 + } + impl.Dormlq(side, transt, mi, ni, nq-1, a[1:], lda, tau, c[i1*ldc+i2:], ldc, work, lwork) + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go new file mode 100644 index 00000000..c00f4405 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go @@ -0,0 +1,129 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dormhr multiplies an m×n general matrix C with an nq×nq orthogonal matrix Q +// Q * C, if side == blas.Left and trans == blas.NoTrans, +// Q^T * C, if side == blas.Left and trans == blas.Trans, +// C * Q, if side == blas.Right and trans == blas.NoTrans, +// C * Q^T, if side == blas.Right and trans == blas.Trans, +// where nq == m if side == blas.Left and nq == n if side == blas.Right. +// +// Q is defined implicitly as the product of ihi-ilo elementary reflectors, as +// returned by Dgehrd: +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Q is equal to the identity matrix except in the submatrix +// Q[ilo+1:ihi+1,ilo+1:ihi+1]. +// +// ilo and ihi must have the same values as in the previous call of Dgehrd. It +// must hold that +// 0 <= ilo <= ihi < m, if m > 0 and side == blas.Left, +// ilo = 0 and ihi = -1, if m = 0 and side == blas.Left, +// 0 <= ilo <= ihi < n, if n > 0 and side == blas.Right, +// ilo = 0 and ihi = -1, if n = 0 and side == blas.Right. +// +// a and lda represent an m×m matrix if side == blas.Left and an n×n matrix if +// side == blas.Right. The matrix contains vectors which define the elementary +// reflectors, as returned by Dgehrd. +// +// tau contains the scalar factors of the elementary reflectors, as returned by +// Dgehrd. tau must have length m-1 if side == blas.Left and n-1 if side == +// blas.Right. +// +// c and ldc represent the m×n matrix C. On return, c is overwritten by the +// product with Q. +// +// work must have length at least max(1,lwork), and lwork must be at least +// max(1,n), if side == blas.Left, and max(1,m), if side == blas.Right. For +// optimum performance lwork should be at least n*nb if side == blas.Left and +// m*nb if side == blas.Right, where nb is the optimal block size. On return, +// work[0] will contain the optimal value of lwork. +// +// If lwork == -1, instead of performing Dormhr, only the optimal value of lwork +// will be stored in work[0]. +// +// If any requirement on input sizes is not met, Dormhr will panic. +// +// Dormhr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormhr(side blas.Side, trans blas.Transpose, m, n, ilo, ihi int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + nq := n // The order of Q. + nw := m // The minimum length of work. + if side == blas.Left { + nq = m + nw = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case ilo < 0 || max(1, nq) <= ilo: + panic(badIlo) + case ihi < min(ilo, nq-1) || nq <= ihi: + panic(badIhi) + case lda < max(1, nq): + panic(badLdA) + case lwork < max(1, nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if m == 0 || n == 0 { + work[0] = 1 + return + } + + nh := ihi - ilo + var nb int + if side == blas.Left { + opts := "LN" + if trans == blas.Trans { + opts = "LT" + } + nb = impl.Ilaenv(1, "DORMQR", opts, nh, n, nh, -1) + } else { + opts := "RN" + if trans == blas.Trans { + opts = "RT" + } + nb = impl.Ilaenv(1, "DORMQR", opts, m, nh, nh, -1) + } + lwkopt := max(1, nw) * nb + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + if nh == 0 { + work[0] = 1 + return + } + + switch { + case len(a) < (nq-1)*lda+nq: + panic(shortA) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case len(tau) != nq-1: + panic(badLenTau) + } + + if side == blas.Left { + impl.Dormqr(side, trans, nh, n, nh, a[(ilo+1)*lda+ilo:], lda, + tau[ilo:ihi], c[(ilo+1)*ldc:], ldc, work, lwork) + } else { + impl.Dormqr(side, trans, m, nh, nh, a[(ilo+1)*lda+ilo:], lda, + tau[ilo:ihi], c[ilo+1:], ldc, work, lwork) + } + work[0] = float64(lwkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go new file mode 100644 index 00000000..25aa83ac --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go @@ -0,0 +1,102 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorml2 multiplies a general matrix C by an orthogonal matrix from an LQ factorization +// determined by Dgelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of side k×m, and if side == blas.Right +// a is of size k×n. +// +// tau contains the Householder factors and is of length at least k and this function will +// panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dorml2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorml2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + left := side == blas.Left + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.Trans && trans != blas.NoTrans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case left && lda < max(1, m): + panic(badLdA) + case !left && lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + return + } + + switch { + case left && len(a) < (k-1)*lda+m: + panic(shortA) + case !left && len(a) < (k-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case left && len(work) < n: + panic(shortWork) + case !left && len(work) < m: + panic(shortWork) + } + + notrans := trans == blas.NoTrans + switch { + case left && notrans: + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + + case left && !notrans: + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + + case !left && notrans: + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + + case !left && !notrans: + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go new file mode 100644 index 00000000..6fcfc2fb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go @@ -0,0 +1,174 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormlq multiplies the matrix C by the orthogonal matrix Q defined by the +// slices a and tau. A and tau are as returned from Dgelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right +// A is of size k×n. This uses a blocked algorithm. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, +// and this function will panic otherwise. +// Dormlq uses a block algorithm, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dormlq, +// the optimal work length will be stored into work[0]. +// +// tau contains the Householder scales and must have length at least k, and +// this function will panic otherwise. +func (impl Implementation) Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + left := side == blas.Left + nw := m + if left { + nw = n + } + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.Trans && trans != blas.NoTrans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case left && lda < max(1, m): + panic(badLdA) + case !left && lda < max(1, n): + panic(badLdA) + case lwork < max(1, nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + work[0] = 1 + return + } + + const ( + nbmax = 64 + ldt = nbmax + tsize = nbmax * ldt + ) + opts := string(side) + string(trans) + nb := min(nbmax, impl.Ilaenv(1, "DORMLQ", opts, m, n, k, -1)) + lworkopt := max(1, nw)*nb + tsize + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case left && len(a) < (k-1)*lda+m: + panic(shortA) + case !left && len(a) < (k-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + } + + nbmin := 2 + if 1 < nb && nb < k { + iws := nw*nb + tsize + if lwork < iws { + nb = (lwork - tsize) / nw + nbmin = max(2, impl.Ilaenv(2, "DORMLQ", opts, m, n, k, -1)) + } + } + if nb < nbmin || k <= nb { + // Call unblocked code. + impl.Dorml2(side, trans, m, n, k, a, lda, tau, c, ldc, work) + work[0] = float64(lworkopt) + return + } + + t := work[:tsize] + wrk := work[tsize:] + ldwrk := nb + + notrans := trans == blas.NoTrans + transt := blas.NoTrans + if notrans { + transt = blas.Trans + } + + switch { + case left && notrans: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, + a[i*lda+i:], lda, + t, ldt, + c[i*ldc:], ldc, + wrk, ldwrk) + } + + case left && !notrans: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, + a[i*lda+i:], lda, + t, ldt, + c[i*ldc:], ldc, + wrk, ldwrk) + } + + case !left && notrans: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, + a[i*lda+i:], lda, + t, ldt, + c[i:], ldc, + wrk, ldwrk) + } + + case !left && !notrans: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, + a[i*lda+i:], lda, + t, ldt, + c[i:], ldc, + wrk, ldwrk) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go new file mode 100644 index 00000000..8ae45086 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go @@ -0,0 +1,177 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormqr multiplies an m×n matrix C by an orthogonal matrix Q as +// C = Q * C, if side == blas.Left and trans == blas.NoTrans, +// C = Q^T * C, if side == blas.Left and trans == blas.Trans, +// C = C * Q, if side == blas.Right and trans == blas.NoTrans, +// C = C * Q^T, if side == blas.Right and trans == blas.Trans, +// where Q is defined as the product of k elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1}. +// +// If side == blas.Left, A is an m×k matrix and 0 <= k <= m. +// If side == blas.Right, A is an n×k matrix and 0 <= k <= n. +// The ith column of A contains the vector which defines the elementary +// reflector H_i and tau[i] contains its scalar factor. tau must have length k +// and Dormqr will panic otherwise. Dgeqrf returns A and tau in the required +// form. +// +// work must have length at least max(1,lwork), and lwork must be at least n if +// side == blas.Left and at least m if side == blas.Right, otherwise Dormqr will +// panic. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= m if side == blas.Left and lwork >= n if side == +// blas.Right, and this function will panic otherwise. Larger values of lwork +// will generally give better performance. On return, work[0] will contain the +// optimal value of lwork. +// +// If lwork is -1, instead of performing Dormqr, the optimal workspace size will +// be stored into work[0]. +func (impl Implementation) Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + left := side == blas.Left + nq := n + nw := m + if left { + nq = m + nw = n + } + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case lda < max(1, k): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + case lwork < max(1, nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + work[0] = 1 + return + } + + const ( + nbmax = 64 + ldt = nbmax + tsize = nbmax * ldt + ) + opts := string(side) + string(trans) + nb := min(nbmax, impl.Ilaenv(1, "DORMQR", opts, m, n, k, -1)) + lworkopt := max(1, nw)*nb + tsize + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (nq-1)*lda+k: + panic(shortA) + case len(tau) != k: + panic(badLenTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + } + + nbmin := 2 + if 1 < nb && nb < k { + if lwork < nw*nb+tsize { + nb = (lwork - tsize) / nw + nbmin = max(2, impl.Ilaenv(2, "DORMQR", opts, m, n, k, -1)) + } + } + + if nb < nbmin || k <= nb { + // Call unblocked code. + impl.Dorm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work) + work[0] = float64(lworkopt) + return + } + + var ( + ldwork = nb + notrans = trans == blas.NoTrans + ) + switch { + case left && notrans: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i*ldc:], ldc, + work[tsize:], ldwork) + } + + case left && !notrans: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i*ldc:], ldc, + work[tsize:], ldwork) + } + + case !left && notrans: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i:], ldc, + work[tsize:], ldwork) + } + + case !left && !notrans: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i:], ldc, + work[tsize:], ldwork) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go new file mode 100644 index 00000000..bb03f32c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go @@ -0,0 +1,103 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dormr2 multiplies a general matrix C by an orthogonal matrix from a RQ factorization +// determined by Dgerqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of size k×m, and if side == blas.Right +// a is of size k×n. +// +// tau contains the Householder factors and is of length at least k and this function +// will panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dormr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormr2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + left := side == blas.Left + nq := n + nw := m + if left { + nq = m + nw = n + } + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case lda < max(1, nq): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + return + } + + switch { + case len(a) < (k-1)*lda+nq: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case len(work) < nw: + panic(shortWork) + } + + if left { + if trans == blas.NoTrans { + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+(m-k+i)] + a[i*lda+(m-k+i)] = 1 + impl.Dlarf(side, m-k+i+1, n, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(m-k+i)] = aii + } + return + } + for i := 0; i < k; i++ { + aii := a[i*lda+(m-k+i)] + a[i*lda+(m-k+i)] = 1 + impl.Dlarf(side, m-k+i+1, n, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(m-k+i)] = aii + } + return + } + if trans == blas.NoTrans { + for i := 0; i < k; i++ { + aii := a[i*lda+(n-k+i)] + a[i*lda+(n-k+i)] = 1 + impl.Dlarf(side, m, n-k+i+1, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(n-k+i)] = aii + } + return + } + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+(n-k+i)] + a[i*lda+(n-k+i)] = 1 + impl.Dlarf(side, m, n-k+i+1, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(n-k+i)] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go new file mode 100644 index 00000000..a5beb80b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go @@ -0,0 +1,110 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpbtf2 computes the Cholesky factorization of a symmetric positive banded +// matrix ab. The matrix ab is n×n with kd diagonal bands. The Cholesky +// factorization computed is +// A = U^T * U if ul == blas.Upper +// A = L * L^T if ul == blas.Lower +// ul also specifies the storage of ab. If ul == blas.Upper, then +// ab is stored as an upper-triangular banded matrix with kd super-diagonals, +// and if ul == blas.Lower, ab is stored as a lower-triangular banded matrix +// with kd sub-diagonals. On exit, the banded matrix U or L is stored in-place +// into ab depending on the value of ul. Dpbtf2 returns whether the factorization +// was successfully completed. +// +// The band storage scheme is illustrated below when n = 6, and kd = 2. +// The resulting Cholesky decomposition is stored in the same elements as the +// input band matrix (a11 becomes u11 or l11, etc.). +// +// ul = blas.Upper +// a11 a12 a13 +// a22 a23 a24 +// a33 a34 a35 +// a44 a45 a46 +// a55 a56 * +// a66 * * +// +// ul = blas.Lower +// * * a11 +// * a21 a22 +// a31 a32 a33 +// a42 a43 a44 +// a53 a54 a55 +// a64 a65 a66 +// +// Dpbtf2 is the unblocked version of the algorithm, see Dpbtrf for the blocked +// version. +// +// Dpbtf2 is an internal routine, exported for testing purposes. +func (Implementation) Dpbtf2(ul blas.Uplo, n, kd int, ab []float64, ldab int) (ok bool) { + switch { + case ul != blas.Upper && ul != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case kd < 0: + panic(kdLT0) + case ldab < kd+1: + panic(badLdA) + } + + if n == 0 { + return + } + + if len(ab) < (n-1)*ldab+kd { + panic(shortAB) + } + + bi := blas64.Implementation() + + kld := max(1, ldab-1) + if ul == blas.Upper { + for j := 0; j < n; j++ { + // Compute U(J,J) and test for non positive-definiteness. + ajj := ab[j*ldab] + if ajj <= 0 { + return false + } + ajj = math.Sqrt(ajj) + ab[j*ldab] = ajj + // Compute elements j+1:j+kn of row J and update the trailing submatrix + // within the band. + kn := min(kd, n-j-1) + if kn > 0 { + bi.Dscal(kn, 1/ajj, ab[j*ldab+1:], 1) + bi.Dsyr(blas.Upper, kn, -1, ab[j*ldab+1:], 1, ab[(j+1)*ldab:], kld) + } + } + return true + } + for j := 0; j < n; j++ { + // Compute L(J,J) and test for non positive-definiteness. + ajj := ab[j*ldab+kd] + if ajj <= 0 { + return false + } + ajj = math.Sqrt(ajj) + ab[j*ldab+kd] = ajj + + // Compute elements J+1:J+KN of column J and update the trailing submatrix + // within the band. + kn := min(kd, n-j-1) + if kn > 0 { + bi.Dscal(kn, 1/ajj, ab[(j+1)*ldab+kd-1:], kld) + bi.Dsyr(blas.Lower, kn, -1, ab[(j+1)*ldab+kd-1:], kld, ab[(j+1)*ldab+kd:], kld) + } + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go new file mode 100644 index 00000000..7af4c187 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go @@ -0,0 +1,90 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpocon estimates the reciprocal of the condition number of a positive-definite +// matrix A given the Cholesky decomposition of A. The condition number computed +// is based on the 1-norm and the ∞-norm. +// +// anorm is the 1-norm and the ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 3*n and Dpocon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dpocon will panic otherwise. +func (impl Implementation) Dpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case anorm < 0: + panic(negANorm) + } + + // Quick return if possible. + if n == 0 { + return 1 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(work) < 3*n: + panic(shortWork) + case len(iwork) < n: + panic(shortIWork) + } + + if anorm == 0 { + return 0 + } + + bi := blas64.Implementation() + + var ( + smlnum = dlamchS + rcond float64 + sl, su float64 + normin bool + ainvnm float64 + kase int + isave [3]int + ) + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, &isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / ainvnm) / anorm + } + return rcond + } + if uplo == blas.Upper { + sl = impl.Dlatrs(blas.Upper, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + normin = true + su = impl.Dlatrs(blas.Upper, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + } else { + sl = impl.Dlatrs(blas.Lower, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + normin = true + su = impl.Dlatrs(blas.Lower, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + } + scale := sl * su + if scale != 1 { + ix := bi.Idamax(n, work, 1) + if scale == 0 || scale < math.Abs(work[ix])*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go new file mode 100644 index 00000000..5d3327c2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go @@ -0,0 +1,82 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpotf2 computes the Cholesky decomposition of the symmetric positive definite +// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, +// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T +// is computed and stored in-place into a. If a is not positive definite, false +// is returned. This is the unblocked version of the algorithm. +// +// Dpotf2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dpotf2(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { + switch { + case ul != blas.Upper && ul != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return true + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + bi := blas64.Implementation() + + if ul == blas.Upper { + for j := 0; j < n; j++ { + ajj := a[j*lda+j] + if j != 0 { + ajj -= bi.Ddot(j, a[j:], lda, a[j:], lda) + } + if ajj <= 0 || math.IsNaN(ajj) { + a[j*lda+j] = ajj + return false + } + ajj = math.Sqrt(ajj) + a[j*lda+j] = ajj + if j < n-1 { + bi.Dgemv(blas.Trans, j, n-j-1, + -1, a[j+1:], lda, a[j:], lda, + 1, a[j*lda+j+1:], 1) + bi.Dscal(n-j-1, 1/ajj, a[j*lda+j+1:], 1) + } + } + return true + } + for j := 0; j < n; j++ { + ajj := a[j*lda+j] + if j != 0 { + ajj -= bi.Ddot(j, a[j*lda:], 1, a[j*lda:], 1) + } + if ajj <= 0 || math.IsNaN(ajj) { + a[j*lda+j] = ajj + return false + } + ajj = math.Sqrt(ajj) + a[j*lda+j] = ajj + if j < n-1 { + bi.Dgemv(blas.NoTrans, n-j-1, j, + -1, a[(j+1)*lda:], lda, a[j*lda:], 1, + 1, a[(j+1)*lda+j:], lda) + bi.Dscal(n-j-1, 1/ajj, a[(j+1)*lda+j:], lda) + } + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go new file mode 100644 index 00000000..21241687 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go @@ -0,0 +1,81 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpotrf computes the Cholesky decomposition of the symmetric positive definite +// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, +// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T +// is computed and stored in-place into a. If a is not positive definite, false +// is returned. This is the blocked version of the algorithm. +func (impl Implementation) Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { + switch { + case ul != blas.Upper && ul != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return true + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + nb := impl.Ilaenv(1, "DPOTRF", string(ul), n, -1, -1, -1) + if nb <= 1 || n <= nb { + return impl.Dpotf2(ul, n, a, lda) + } + bi := blas64.Implementation() + if ul == blas.Upper { + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dsyrk(blas.Upper, blas.Trans, jb, j, + -1, a[j:], lda, + 1, a[j*lda+j:], lda) + ok = impl.Dpotf2(blas.Upper, jb, a[j*lda+j:], lda) + if !ok { + return ok + } + if j+jb < n { + bi.Dgemm(blas.Trans, blas.NoTrans, jb, n-j-jb, j, + -1, a[j:], lda, a[j+jb:], lda, + 1, a[j*lda+j+jb:], lda) + bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, jb, n-j-jb, + 1, a[j*lda+j:], lda, + a[j*lda+j+jb:], lda) + } + } + return true + } + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dsyrk(blas.Lower, blas.NoTrans, jb, j, + -1, a[j*lda:], lda, + 1, a[j*lda+j:], lda) + ok := impl.Dpotf2(blas.Lower, jb, a[j*lda+j:], lda) + if !ok { + return ok + } + if j+jb < n { + bi.Dgemm(blas.NoTrans, blas.Trans, n-j-jb, jb, j, + -1, a[(j+jb)*lda:], lda, a[j*lda:], lda, + 1, a[(j+jb)*lda+j:], lda) + bi.Dtrsm(blas.Right, blas.Lower, blas.Trans, blas.NonUnit, n-j-jb, jb, + 1, a[j*lda+j:], lda, + a[(j+jb)*lda+j:], lda) + } + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go new file mode 100644 index 00000000..2394775c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go @@ -0,0 +1,44 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dpotri computes the inverse of a real symmetric positive definite matrix A +// using its Cholesky factorization. +// +// On entry, a contains the triangular factor U or L from the Cholesky +// factorization A = U^T*U or A = L*L^T, as computed by Dpotrf. +// On return, a contains the upper or lower triangle of the (symmetric) +// inverse of A, overwriting the input factor U or L. +func (impl Implementation) Dpotri(uplo blas.Uplo, n int, a []float64, lda int) (ok bool) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return true + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + // Invert the triangular Cholesky factor U or L. + ok = impl.Dtrtri(uplo, blas.NonUnit, n, a, lda) + if !ok { + return false + } + + // Form inv(U)*inv(U)^T or inv(L)^T*inv(L). + impl.Dlauum(uplo, n, a, lda) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go new file mode 100644 index 00000000..689e0439 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go @@ -0,0 +1,62 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpotrs solves a system of n linear equations A*X = B where A is an n×n +// symmetric positive definite matrix and B is an n×nrhs matrix. The matrix A is +// represented by its Cholesky factorization +// A = U^T*U if uplo == blas.Upper +// A = L*L^T if uplo == blas.Lower +// as computed by Dpotrf. On entry, B contains the right-hand side matrix B, on +// return it contains the solution matrix X. +func (Implementation) Dpotrs(uplo blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case nrhs < 0: + panic(nrhsLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, nrhs): + panic(badLdB) + } + + // Quick return if possible. + if n == 0 || nrhs == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(b) < (n-1)*ldb+nrhs: + panic(shortB) + } + + bi := blas64.Implementation() + + if uplo == blas.Upper { + // Solve U^T * U * X = B where U is stored in the upper triangle of A. + + // Solve U^T * X = B, overwriting B with X. + bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) + // Solve U * X = B, overwriting B with X. + bi.Dtrsm(blas.Left, blas.Upper, blas.NoTrans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) + } else { + // Solve L * L^T * X = B where L is stored in the lower triangle of A. + + // Solve L * X = B, overwriting B with X. + bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) + // Solve L^T * X = B, overwriting B with X. + bi.Dtrsm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go b/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go new file mode 100644 index 00000000..b2772dbc --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go @@ -0,0 +1,63 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Drscl multiplies the vector x by 1/a being careful to avoid overflow or +// underflow where possible. +// +// Drscl is an internal routine. It is exported for testing purposes. +func (impl Implementation) Drscl(n int, a float64, x []float64, incX int) { + switch { + case n < 0: + panic(nLT0) + case incX <= 0: + panic(badIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + if len(x) < 1+(n-1)*incX { + panic(shortX) + } + + bi := blas64.Implementation() + + cden := a + cnum := 1.0 + smlnum := dlamchS + bignum := 1 / smlnum + for { + cden1 := cden * smlnum + cnum1 := cnum / bignum + var mul float64 + var done bool + switch { + case cnum != 0 && math.Abs(cden1) > math.Abs(cnum): + mul = smlnum + done = false + cden = cden1 + case math.Abs(cnum1) > math.Abs(cden): + mul = bignum + done = false + cnum = cnum1 + default: + mul = cnum / cden + done = true + } + bi.Dscal(n, mul, x, incX) + if done { + break + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go new file mode 100644 index 00000000..d6c7861a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go @@ -0,0 +1,376 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dsteqr computes the eigenvalues and optionally the eigenvectors of a symmetric +// tridiagonal matrix using the implicit QL or QR method. The eigenvectors of a +// full or band symmetric matrix can also be found if Dsytrd, Dsptrd, or Dsbtrd +// have been used to reduce this matrix to tridiagonal form. +// +// d, on entry, contains the diagonal elements of the tridiagonal matrix. On exit, +// d contains the eigenvalues in ascending order. d must have length n and +// Dsteqr will panic otherwise. +// +// e, on entry, contains the off-diagonal elements of the tridiagonal matrix on +// entry, and is overwritten during the call to Dsteqr. e must have length n-1 and +// Dsteqr will panic otherwise. +// +// z, on entry, contains the n×n orthogonal matrix used in the reduction to +// tridiagonal form if compz == lapack.EVOrig. On exit, if +// compz == lapack.EVOrig, z contains the orthonormal eigenvectors of the +// original symmetric matrix, and if compz == lapack.EVTridiag, z contains the +// orthonormal eigenvectors of the symmetric tridiagonal matrix. z is not used +// if compz == lapack.EVCompNone. +// +// work must have length at least max(1, 2*n-2) if the eigenvectors are computed, +// and Dsteqr will panic otherwise. +// +// Dsteqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsteqr(compz lapack.EVComp, n int, d, e, z []float64, ldz int, work []float64) (ok bool) { + switch { + case compz != lapack.EVCompNone && compz != lapack.EVTridiag && compz != lapack.EVOrig: + panic(badEVComp) + case n < 0: + panic(nLT0) + case ldz < 1, compz != lapack.EVCompNone && ldz < n: + panic(badLdZ) + } + + // Quick return if possible. + if n == 0 { + return true + } + + switch { + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + case compz != lapack.EVCompNone && len(z) < (n-1)*ldz+n: + panic(shortZ) + case compz != lapack.EVCompNone && len(work) < max(1, 2*n-2): + panic(shortWork) + } + + var icompz int + if compz == lapack.EVOrig { + icompz = 1 + } else if compz == lapack.EVTridiag { + icompz = 2 + } + + if n == 1 { + if icompz == 2 { + z[0] = 1 + } + return true + } + + bi := blas64.Implementation() + + eps := dlamchE + eps2 := eps * eps + safmin := dlamchS + safmax := 1 / safmin + ssfmax := math.Sqrt(safmax) / 3 + ssfmin := math.Sqrt(safmin) / eps2 + + // Compute the eigenvalues and eigenvectors of the tridiagonal matrix. + if icompz == 2 { + impl.Dlaset(blas.All, n, n, 0, 1, z, ldz) + } + const maxit = 30 + nmaxit := n * maxit + + jtot := 0 + + // Determine where the matrix splits and choose QL or QR iteration for each + // block, according to whether top or bottom diagonal element is smaller. + l1 := 0 + nm1 := n - 1 + + type scaletype int + const ( + down scaletype = iota + 1 + up + ) + var iscale scaletype + + for { + if l1 > n-1 { + // Order eigenvalues and eigenvectors. + if icompz == 0 { + impl.Dlasrt(lapack.SortIncreasing, n, d) + } else { + // TODO(btracey): Consider replacing this sort with a call to sort.Sort. + for ii := 1; ii < n; ii++ { + i := ii - 1 + k := i + p := d[i] + for j := ii; j < n; j++ { + if d[j] < p { + k = j + p = d[j] + } + } + if k != i { + d[k] = d[i] + d[i] = p + bi.Dswap(n, z[i:], ldz, z[k:], ldz) + } + } + } + return true + } + if l1 > 0 { + e[l1-1] = 0 + } + var m int + if l1 <= nm1 { + for m = l1; m < nm1; m++ { + test := math.Abs(e[m]) + if test == 0 { + break + } + if test <= (math.Sqrt(math.Abs(d[m]))*math.Sqrt(math.Abs(d[m+1])))*eps { + e[m] = 0 + break + } + } + } + l := l1 + lsv := l + lend := m + lendsv := lend + l1 = m + 1 + if lend == l { + continue + } + + // Scale submatrix in rows and columns L to Lend + anorm := impl.Dlanst(lapack.MaxAbs, lend-l+1, d[l:], e[l:]) + switch { + case anorm == 0: + continue + case anorm > ssfmax: + iscale = down + // Pretend that d and e are matrices with 1 column. + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l+1, 1, d[l:], 1) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l, 1, e[l:], 1) + case anorm < ssfmin: + iscale = up + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l+1, 1, d[l:], 1) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l, 1, e[l:], 1) + } + + // Choose between QL and QR. + if math.Abs(d[lend]) < math.Abs(d[l]) { + lend = lsv + l = lendsv + } + if lend > l { + // QL Iteration. Look for small subdiagonal element. + for { + if l != lend { + for m = l; m < lend; m++ { + v := math.Abs(e[m]) + if v*v <= (eps2*math.Abs(d[m]))*math.Abs(d[m+1])+safmin { + break + } + } + } else { + m = lend + } + if m < lend { + e[m] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l++ + if l > lend { + break + } + continue + } + + // If remaining matrix is 2×2, use Dlae2 to compute its eigensystem. + if m == l+1 { + if icompz > 0 { + d[l], d[l+1], work[l], work[n-1+l] = impl.Dlaev2(d[l], e[l], d[l+1]) + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, + n, 2, work[l:], work[n-1+l:], z[l:], ldz) + } else { + d[l], d[l+1] = impl.Dlae2(d[l], e[l], d[l+1]) + } + e[l] = 0 + l += 2 + if l > lend { + break + } + continue + } + + if jtot == nmaxit { + break + } + jtot++ + + // Form shift + g := (d[l+1] - p) / (2 * e[l]) + r := impl.Dlapy2(g, 1) + g = d[m] - p + e[l]/(g+math.Copysign(r, g)) + s := 1.0 + c := 1.0 + p = 0.0 + + // Inner loop + for i := m - 1; i >= l; i-- { + f := s * e[i] + b := c * e[i] + c, s, r = impl.Dlartg(g, f) + if i != m-1 { + e[i+1] = r + } + g = d[i+1] - p + r = (d[i]-g)*s + 2*c*b + p = s * r + d[i+1] = g + p + g = c*r - b + + // If eigenvectors are desired, then save rotations. + if icompz > 0 { + work[i] = c + work[n-1+i] = -s + } + } + // If eigenvectors are desired, then apply saved rotations. + if icompz > 0 { + mm := m - l + 1 + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, + n, mm, work[l:], work[n-1+l:], z[l:], ldz) + } + d[l] -= p + e[l] = g + } + } else { + // QR Iteration. + // Look for small superdiagonal element. + for { + if l != lend { + for m = l; m > lend; m-- { + v := math.Abs(e[m-1]) + if v*v <= (eps2*math.Abs(d[m])*math.Abs(d[m-1]) + safmin) { + break + } + } + } else { + m = lend + } + if m > lend { + e[m-1] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found + l-- + if l < lend { + break + } + continue + } + + // If remaining matrix is 2×2, use Dlae2 to compute its eigenvalues. + if m == l-1 { + if icompz > 0 { + d[l-1], d[l], work[m], work[n-1+m] = impl.Dlaev2(d[l-1], e[l-1], d[l]) + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, + n, 2, work[m:], work[n-1+m:], z[l-1:], ldz) + } else { + d[l-1], d[l] = impl.Dlae2(d[l-1], e[l-1], d[l]) + } + e[l-1] = 0 + l -= 2 + if l < lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + g := (d[l-1] - p) / (2 * e[l-1]) + r := impl.Dlapy2(g, 1) + g = d[m] - p + (e[l-1])/(g+math.Copysign(r, g)) + s := 1.0 + c := 1.0 + p = 0.0 + + // Inner loop. + for i := m; i < l; i++ { + f := s * e[i] + b := c * e[i] + c, s, r = impl.Dlartg(g, f) + if i != m { + e[i-1] = r + } + g = d[i] - p + r = (d[i+1]-g)*s + 2*c*b + p = s * r + d[i] = g + p + g = c*r - b + + // If eigenvectors are desired, then save rotations. + if icompz > 0 { + work[i] = c + work[n-1+i] = s + } + } + + // If eigenvectors are desired, then apply saved rotations. + if icompz > 0 { + mm := l - m + 1 + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, + n, mm, work[m:], work[n-1+m:], z[m:], ldz) + } + d[l] -= p + e[l-1] = g + } + } + + // Undo scaling if necessary. + switch iscale { + case down: + // Pretend that d and e are matrices with 1 column. + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, d[lsv:], 1) + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv, 1, e[lsv:], 1) + case up: + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, d[lsv:], 1) + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv, 1, e[lsv:], 1) + } + + // Check for no convergence to an eigenvalue after a total of n*maxit iterations. + if jtot >= nmaxit { + break + } + } + for i := 0; i < n-1; i++ { + if e[i] != 0 { + return false + } + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go new file mode 100644 index 00000000..dc1e178d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go @@ -0,0 +1,285 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dsterf computes all eigenvalues of a symmetric tridiagonal matrix using the +// Pal-Walker-Kahan variant of the QL or QR algorithm. +// +// d contains the diagonal elements of the tridiagonal matrix on entry, and +// contains the eigenvalues in ascending order on exit. d must have length at +// least n, or Dsterf will panic. +// +// e contains the off-diagonal elements of the tridiagonal matrix on entry, and is +// overwritten during the call to Dsterf. e must have length of at least n-1 or +// Dsterf will panic. +// +// Dsterf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsterf(n int, d, e []float64) (ok bool) { + if n < 0 { + panic(nLT0) + } + + // Quick return if possible. + if n == 0 { + return true + } + + switch { + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + } + + if n == 1 { + return true + } + + const ( + none = 0 // The values are not scaled. + down = 1 // The values are scaled below ssfmax threshold. + up = 2 // The values are scaled below ssfmin threshold. + ) + + // Determine the unit roundoff for this environment. + eps := dlamchE + eps2 := eps * eps + safmin := dlamchS + safmax := 1 / safmin + ssfmax := math.Sqrt(safmax) / 3 + ssfmin := math.Sqrt(safmin) / eps2 + + // Compute the eigenvalues of the tridiagonal matrix. + maxit := 30 + nmaxit := n * maxit + jtot := 0 + + l1 := 0 + + for { + if l1 > n-1 { + impl.Dlasrt(lapack.SortIncreasing, n, d) + return true + } + if l1 > 0 { + e[l1-1] = 0 + } + var m int + for m = l1; m < n-1; m++ { + if math.Abs(e[m]) <= math.Sqrt(math.Abs(d[m]))*math.Sqrt(math.Abs(d[m+1]))*eps { + e[m] = 0 + break + } + } + + l := l1 + lsv := l + lend := m + lendsv := lend + l1 = m + 1 + if lend == 0 { + continue + } + + // Scale submatrix in rows and columns l to lend. + anorm := impl.Dlanst(lapack.MaxAbs, lend-l+1, d[l:], e[l:]) + iscale := none + if anorm == 0 { + continue + } + if anorm > ssfmax { + iscale = down + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l+1, 1, d[l:], n) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l, 1, e[l:], n) + } else if anorm < ssfmin { + iscale = up + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l+1, 1, d[l:], n) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l, 1, e[l:], n) + } + + el := e[l:lend] + for i, v := range el { + el[i] *= v + } + + // Choose between QL and QR iteration. + if math.Abs(d[lend]) < math.Abs(d[l]) { + lend = lsv + l = lendsv + } + if lend >= l { + // QL Iteration. + // Look for small sub-diagonal element. + for { + if l != lend { + for m = l; m < lend; m++ { + if math.Abs(e[m]) <= eps2*(math.Abs(d[m]*d[m+1])) { + break + } + } + } else { + m = lend + } + if m < lend { + e[m] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l++ + if l > lend { + break + } + continue + } + // If remaining matrix is 2 by 2, use Dlae2 to compute its eigenvalues. + if m == l+1 { + d[l], d[l+1] = impl.Dlae2(d[l], math.Sqrt(e[l]), d[l+1]) + e[l] = 0 + l += 2 + if l > lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + rte := math.Sqrt(e[l]) + sigma := (d[l+1] - p) / (2 * rte) + r := impl.Dlapy2(sigma, 1) + sigma = p - (rte / (sigma + math.Copysign(r, sigma))) + + c := 1.0 + s := 0.0 + gamma := d[m] - sigma + p = gamma * gamma + + // Inner loop. + for i := m - 1; i >= l; i-- { + bb := e[i] + r := p + bb + if i != m-1 { + e[i+1] = s * r + } + oldc := c + c = p / r + s = bb / r + oldgam := gamma + alpha := d[i] + gamma = c*(alpha-sigma) - s*oldgam + d[i+1] = oldgam + (alpha - gamma) + if c != 0 { + p = (gamma * gamma) / c + } else { + p = oldc * bb + } + } + e[l] = s * p + d[l] = sigma + gamma + } + } else { + for { + // QR Iteration. + // Look for small super-diagonal element. + for m = l; m > lend; m-- { + if math.Abs(e[m-1]) <= eps2*math.Abs(d[m]*d[m-1]) { + break + } + } + if m > lend { + e[m-1] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l-- + if l < lend { + break + } + continue + } + + // If remaining matrix is 2 by 2, use Dlae2 to compute its eigenvalues. + if m == l-1 { + d[l], d[l-1] = impl.Dlae2(d[l], math.Sqrt(e[l-1]), d[l-1]) + e[l-1] = 0 + l -= 2 + if l < lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + rte := math.Sqrt(e[l-1]) + sigma := (d[l-1] - p) / (2 * rte) + r := impl.Dlapy2(sigma, 1) + sigma = p - (rte / (sigma + math.Copysign(r, sigma))) + + c := 1.0 + s := 0.0 + gamma := d[m] - sigma + p = gamma * gamma + + // Inner loop. + for i := m; i < l; i++ { + bb := e[i] + r := p + bb + if i != m { + e[i-1] = s * r + } + oldc := c + c = p / r + s = bb / r + oldgam := gamma + alpha := d[i+1] + gamma = c*(alpha-sigma) - s*oldgam + d[i] = oldgam + alpha - gamma + if c != 0 { + p = (gamma * gamma) / c + } else { + p = oldc * bb + } + } + e[l-1] = s * p + d[l] = sigma + gamma + } + } + + // Undo scaling if necessary + switch iscale { + case down: + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, d[lsv:], n) + case up: + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, d[lsv:], n) + } + + // Check for no convergence to an eigenvalue after a total of n*maxit iterations. + if jtot >= nmaxit { + break + } + } + for _, v := range e[:n-1] { + if v != 0 { + return false + } + } + impl.Dlasrt(lapack.SortIncreasing, n, d) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go new file mode 100644 index 00000000..5f57f3a5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go @@ -0,0 +1,130 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dsyev computes all eigenvalues and, optionally, the eigenvectors of a real +// symmetric matrix A. +// +// w contains the eigenvalues in ascending order upon return. w must have length +// at least n, and Dsyev will panic otherwise. +// +// On entry, a contains the elements of the symmetric matrix A in the triangular +// portion specified by uplo. If jobz == lapack.EVCompute, a contains the +// orthonormal eigenvectors of A on exit, otherwise jobz must be lapack.EVNone +// and on exit the specified triangular region is overwritten. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 3*n-1, and Dsyev will panic otherwise. The amount of blocking is +// limited by the usable length. If lwork == -1, instead of computing Dsyev the +// optimal work length is stored into work[0]. +func (impl Implementation) Dsyev(jobz lapack.EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool) { + switch { + case jobz != lapack.EVNone && jobz != lapack.EVCompute: + panic(badEVJob) + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, 3*n-1) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + return true + } + + var opts string + if uplo == blas.Upper { + opts = "U" + } else { + opts = "L" + } + nb := impl.Ilaenv(1, "DSYTRD", opts, n, -1, -1, -1) + lworkopt := max(1, (nb+2)*n) + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(w) < n: + panic(shortW) + } + + if n == 1 { + w[0] = a[0] + work[0] = 2 + if jobz == lapack.EVCompute { + a[0] = 1 + } + return true + } + + safmin := dlamchS + eps := dlamchP + smlnum := safmin / eps + bignum := 1 / smlnum + rmin := math.Sqrt(smlnum) + rmax := math.Sqrt(bignum) + + // Scale matrix to allowable range, if necessary. + anrm := impl.Dlansy(lapack.MaxAbs, uplo, n, a, lda, work) + scaled := false + var sigma float64 + if anrm > 0 && anrm < rmin { + scaled = true + sigma = rmin / anrm + } else if anrm > rmax { + scaled = true + sigma = rmax / anrm + } + if scaled { + kind := lapack.LowerTri + if uplo == blas.Upper { + kind = lapack.UpperTri + } + impl.Dlascl(kind, 0, 0, 1, sigma, n, n, a, lda) + } + var inde int + indtau := inde + n + indwork := indtau + n + llwork := lwork - indwork + impl.Dsytrd(uplo, n, a, lda, w, work[inde:], work[indtau:], work[indwork:], llwork) + + // For eigenvalues only, call Dsterf. For eigenvectors, first call Dorgtr + // to generate the orthogonal matrix, then call Dsteqr. + if jobz == lapack.EVNone { + ok = impl.Dsterf(n, w, work[inde:]) + } else { + impl.Dorgtr(uplo, n, a, lda, work[indtau:], work[indwork:], llwork) + ok = impl.Dsteqr(lapack.EVComp(jobz), n, w, work[inde:], a, lda, work[indtau:]) + } + if !ok { + return false + } + + // If the matrix was scaled, then rescale eigenvalues appropriately. + if scaled { + bi := blas64.Implementation() + bi.Dscal(n, 1/sigma, w, 1) + } + work[0] = float64(lworkopt) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go new file mode 100644 index 00000000..23cfd057 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go @@ -0,0 +1,136 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dsytd2 reduces a symmetric n×n matrix A to symmetric tridiagonal form T by +// an orthogonal similarity transformation +// Q^T * A * Q = T +// On entry, the matrix is contained in the specified triangle of a. On exit, +// if uplo == blas.Upper, the diagonal and first super-diagonal of a are +// overwritten with the elements of T. The elements above the first super-diagonal +// are overwritten with the elementary reflectors that are used with +// the elements written to tau in order to construct Q. If uplo == blas.Lower, +// the elements are written in the lower triangular region. +// +// d must have length at least n. e and tau must have length at least n-1. Dsytd2 +// will panic if these sizes are not met. +// +// Q is represented as a product of elementary reflectors. +// If uplo == blas.Upper +// Q = H_{n-2} * ... * H_1 * H_0 +// and if uplo == blas.Lower +// Q = H_0 * H_1 * ... * H_{n-2} +// where +// H_i = I - tau * v * v^T +// where tau is stored in tau[i], and v is stored in a. +// +// If uplo == blas.Upper, v[0:i-1] is stored in A[0:i-1,i+1], v[i] = 1, and +// v[i+1:] = 0. The elements of a are +// [ d e v2 v3 v4] +// [ d e v3 v4] +// [ d e v4] +// [ d e] +// [ d] +// If uplo == blas.Lower, v[0:i+1] = 0, v[i+1] = 1, and v[i+2:] is stored in +// A[i+2:n,i]. +// The elements of a are +// [ d ] +// [ e d ] +// [v1 e d ] +// [v1 v2 e d ] +// [v1 v2 v3 e d] +// +// Dsytd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsytd2(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau []float64) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + case len(tau) < n-1: + panic(shortTau) + } + + bi := blas64.Implementation() + + if uplo == blas.Upper { + // Reduce the upper triangle of A. + for i := n - 2; i >= 0; i-- { + // Generate elementary reflector H_i = I - tau * v * v^T to + // annihilate A[i:i-1, i+1]. + var taui float64 + a[i*lda+i+1], taui = impl.Dlarfg(i+1, a[i*lda+i+1], a[i+1:], lda) + e[i] = a[i*lda+i+1] + if taui != 0 { + // Apply H_i from both sides to A[0:i,0:i]. + a[i*lda+i+1] = 1 + + // Compute x := tau * A * v storing x in tau[0:i]. + bi.Dsymv(uplo, i+1, taui, a, lda, a[i+1:], lda, 0, tau, 1) + + // Compute w := x - 1/2 * tau * (x^T * v) * v. + alpha := -0.5 * taui * bi.Ddot(i+1, tau, 1, a[i+1:], lda) + bi.Daxpy(i+1, alpha, a[i+1:], lda, tau, 1) + + // Apply the transformation as a rank-2 update + // A = A - v * w^T - w * v^T. + bi.Dsyr2(uplo, i+1, -1, a[i+1:], lda, tau, 1, a, lda) + a[i*lda+i+1] = e[i] + } + d[i+1] = a[(i+1)*lda+i+1] + tau[i] = taui + } + d[0] = a[0] + return + } + // Reduce the lower triangle of A. + for i := 0; i < n-1; i++ { + // Generate elementary reflector H_i = I - tau * v * v^T to + // annihilate A[i+2:n, i]. + var taui float64 + a[(i+1)*lda+i], taui = impl.Dlarfg(n-i-1, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + if taui != 0 { + // Apply H_i from both sides to A[i+1:n, i+1:n]. + a[(i+1)*lda+i] = 1 + + // Compute x := tau * A * v, storing y in tau[i:n-1]. + bi.Dsymv(uplo, n-i-1, taui, a[(i+1)*lda+i+1:], lda, a[(i+1)*lda+i:], lda, 0, tau[i:], 1) + + // Compute w := x - 1/2 * tau * (x^T * v) * v. + alpha := -0.5 * taui * bi.Ddot(n-i-1, tau[i:], 1, a[(i+1)*lda+i:], lda) + bi.Daxpy(n-i-1, alpha, a[(i+1)*lda+i:], lda, tau[i:], 1) + + // Apply the transformation as a rank-2 update + // A = A - v * w^T - w * v^T. + bi.Dsyr2(uplo, n-i-1, -1, a[(i+1)*lda+i:], lda, tau[i:], 1, a[(i+1)*lda+i+1:], lda) + a[(i+1)*lda+i] = e[i] + } + d[i] = a[i*lda+i] + tau[i] = taui + } + d[n-1] = a[(n-1)*lda+n-1] +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go new file mode 100644 index 00000000..df47568e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go @@ -0,0 +1,172 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dsytrd reduces a symmetric n×n matrix A to symmetric tridiagonal form by an +// orthogonal similarity transformation +// Q^T * A * Q = T +// where Q is an orthonormal matrix and T is symmetric and tridiagonal. +// +// On entry, a contains the elements of the input matrix in the triangle specified +// by uplo. On exit, the diagonal and sub/super-diagonal are overwritten by the +// corresponding elements of the tridiagonal matrix T. The remaining elements in +// the triangle, along with the array tau, contain the data to construct Q as +// the product of elementary reflectors. +// +// If uplo == blas.Upper, Q is constructed with +// Q = H_{n-2} * ... * H_1 * H_0 +// where +// H_i = I - tau_i * v * v^T +// v is constructed as v[i+1:n] = 0, v[i] = 1, v[0:i-1] is stored in A[0:i-1, i+1]. +// The elements of A are +// [ d e v1 v2 v3] +// [ d e v2 v3] +// [ d e v3] +// [ d e] +// [ e] +// +// If uplo == blas.Lower, Q is constructed with +// Q = H_0 * H_1 * ... * H_{n-2} +// where +// H_i = I - tau_i * v * v^T +// v is constructed as v[0:i+1] = 0, v[i+1] = 1, v[i+2:n] is stored in A[i+2:n, i]. +// The elements of A are +// [ d ] +// [ e d ] +// [v0 e d ] +// [v0 v1 e d ] +// [v0 v1 v2 e d] +// +// d must have length n, and e and tau must have length n-1. Dsytrd will panic if +// these conditions are not met. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 1, and Dsytrd will panic otherwise. The amount of blocking is +// limited by the usable length. +// If lwork == -1, instead of computing Dsytrd the optimal work length is stored +// into work[0]. +// +// Dsytrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsytrd(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau, work []float64, lwork int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < 1 && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DSYTRD", string(uplo), n, -1, -1, -1) + lworkopt := n * nb + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + case len(tau) < n-1: + panic(shortTau) + } + + bi := blas64.Implementation() + + nx := n + iws := 1 + var ldwork int + if 1 < nb && nb < n { + // Determine when to cross over from blocked to unblocked code. The last + // block is always handled by unblocked code. + nx = max(nb, impl.Ilaenv(3, "DSYTRD", string(uplo), n, -1, -1, -1)) + if nx < n { + // Determine if workspace is large enough for blocked code. + ldwork = nb + iws = n * ldwork + if lwork < iws { + // Not enough workspace to use optimal nb: determine the minimum + // value of nb and reduce nb or force use of unblocked code by + // setting nx = n. + nb = max(lwork/n, 1) + nbmin := impl.Ilaenv(2, "DSYTRD", string(uplo), n, -1, -1, -1) + if nb < nbmin { + nx = n + } + } + } else { + nx = n + } + } else { + nb = 1 + } + ldwork = nb + + if uplo == blas.Upper { + // Reduce the upper triangle of A. Columns 0:kk are handled by the + // unblocked method. + var i int + kk := n - ((n-nx+nb-1)/nb)*nb + for i = n - nb; i >= kk; i -= nb { + // Reduce columns i:i+nb to tridiagonal form and form the matrix W + // which is needed to update the unreduced part of the matrix. + impl.Dlatrd(uplo, i+nb, nb, a, lda, e, tau, work, ldwork) + + // Update the unreduced submatrix A[0:i-1,0:i-1], using an update + // of the form A = A - V*W^T - W*V^T. + bi.Dsyr2k(uplo, blas.NoTrans, i, nb, -1, a[i:], lda, work, ldwork, 1, a, lda) + + // Copy superdiagonal elements back into A, and diagonal elements into D. + for j := i; j < i+nb; j++ { + a[(j-1)*lda+j] = e[j-1] + d[j] = a[j*lda+j] + } + } + // Use unblocked code to reduce the last or only block + // check that i == kk. + impl.Dsytd2(uplo, kk, a, lda, d, e, tau) + } else { + var i int + // Reduce the lower triangle of A. + for i = 0; i < n-nx; i += nb { + // Reduce columns 0:i+nb to tridiagonal form and form the matrix W + // which is needed to update the unreduced part of the matrix. + impl.Dlatrd(uplo, n-i, nb, a[i*lda+i:], lda, e[i:], tau[i:], work, ldwork) + + // Update the unreduced submatrix A[i+ib:n, i+ib:n], using an update + // of the form A = A + V*W^T - W*V^T. + bi.Dsyr2k(uplo, blas.NoTrans, n-i-nb, nb, -1, a[(i+nb)*lda+i:], lda, + work[nb*ldwork:], ldwork, 1, a[(i+nb)*lda+i+nb:], lda) + + // Copy subdiagonal elements back into A, and diagonal elements into D. + for j := i; j < i+nb; j++ { + a[(j+1)*lda+j] = e[j] + d[j] = a[j*lda+j] + } + } + // Use unblocked code to reduce the last or only block. + impl.Dsytd2(uplo, n-i, a[i*lda+i:], lda, d[i:], e[i:], tau[i:]) + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go new file mode 100644 index 00000000..d70d1962 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go @@ -0,0 +1,373 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtgsja computes the generalized singular value decomposition (GSVD) +// of two real upper triangular or trapezoidal matrices A and B. +// +// A and B have the following forms, which may be obtained by the +// preprocessing subroutine Dggsvp from a general m×n matrix A and p×n +// matrix B: +// +// n-k-l k l +// A = k [ 0 A12 A13 ] if m-k-l >= 0; +// l [ 0 0 A23 ] +// m-k-l [ 0 0 0 ] +// +// n-k-l k l +// A = k [ 0 A12 A13 ] if m-k-l < 0; +// m-k [ 0 0 A23 ] +// +// n-k-l k l +// B = l [ 0 0 B13 ] +// p-l [ 0 0 0 ] +// +// where the k×k matrix A12 and l×l matrix B13 are non-singular +// upper triangular. A23 is l×l upper triangular if m-k-l >= 0, +// otherwise A23 is (m-k)×l upper trapezoidal. +// +// On exit, +// +// U^T*A*Q = D1*[ 0 R ], V^T*B*Q = D2*[ 0 R ], +// +// where U, V and Q are orthogonal matrices. +// R is a non-singular upper triangular matrix, and D1 and D2 are +// diagonal matrices, which are of the following structures: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[0:m, n-k-l:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// The computation of the orthogonal transformation matrices U, V or Q +// is optional. These matrices may either be formed explicitly, or they +// may be post-multiplied into input matrices U1, V1, or Q1. +// +// Dtgsja essentially uses a variant of Kogbetliantz algorithm to reduce +// min(l,m-k)×l triangular or trapezoidal matrix A23 and l×l +// matrix B13 to the form: +// +// U1^T*A13*Q1 = C1*R1; V1^T*B13*Q1 = S1*R1, +// +// where U1, V1 and Q1 are orthogonal matrices. C1 and S1 are diagonal +// matrices satisfying +// +// C1^2 + S1^2 = I, +// +// and R1 is an l×l non-singular upper triangular matrix. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDUnit Use unit-initialized matrix +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// k and l specify the sub-blocks in the input matrices A and B: +// A23 = A[k:min(k+l,m), n-l:n) and B13 = B[0:l, n-l:n] +// of A and B, whose GSVD is going to be computed by Dtgsja. +// +// tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz +// iteration procedure. Generally, they are the same as used in the preprocessing +// step, for example, +// tola = max(m, n)*norm(A)*eps, +// tolb = max(p, n)*norm(B)*eps, +// where eps is the machine epsilon. +// +// work must have length at least 2*n, otherwise Dtgsja will panic. +// +// alpha and beta must have length n or Dtgsja will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, A[n-k:n, 0:min(k+l,m)] contains the triangular matrix R or part of R +// and if necessary, B[m-k:l, n+m-k-l:n] contains a part of R. +// +// Dtgsja returns whether the routine converged and the number of iteration cycles +// that were run. +// +// Dtgsja is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtgsja(jobU, jobV, jobQ lapack.GSVDJob, m, p, n, k, l int, a []float64, lda int, b []float64, ldb int, tola, tolb float64, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64) (cycles int, ok bool) { + const maxit = 40 + + initu := jobU == lapack.GSVDUnit + wantu := initu || jobU == lapack.GSVDU + + initv := jobV == lapack.GSVDUnit + wantv := initv || jobV == lapack.GSVDV + + initq := jobQ == lapack.GSVDUnit + wantq := initq || jobQ == lapack.GSVDQ + + switch { + case !initu && !wantu && jobU != lapack.GSVDNone: + panic(badGSVDJob + "U") + case !initv && !wantv && jobV != lapack.GSVDNone: + panic(badGSVDJob + "V") + case !initq && !wantq && jobQ != lapack.GSVDNone: + panic(badGSVDJob + "Q") + case m < 0: + panic(mLT0) + case p < 0: + panic(pLT0) + case n < 0: + panic(nLT0) + + case lda < max(1, n): + panic(badLdA) + case len(a) < (m-1)*lda+n: + panic(shortA) + + case ldb < max(1, n): + panic(badLdB) + case len(b) < (p-1)*ldb+n: + panic(shortB) + + case len(alpha) != n: + panic(badLenAlpha) + case len(beta) != n: + panic(badLenBeta) + + case ldu < 1, wantu && ldu < m: + panic(badLdU) + case wantu && len(u) < (m-1)*ldu+m: + panic(shortU) + + case ldv < 1, wantv && ldv < p: + panic(badLdV) + case wantv && len(v) < (p-1)*ldv+p: + panic(shortV) + + case ldq < 1, wantq && ldq < n: + panic(badLdQ) + case wantq && len(q) < (n-1)*ldq+n: + panic(shortQ) + + case len(work) < 2*n: + panic(shortWork) + } + + // Initialize U, V and Q, if necessary + if initu { + impl.Dlaset(blas.All, m, m, 0, 1, u, ldu) + } + if initv { + impl.Dlaset(blas.All, p, p, 0, 1, v, ldv) + } + if initq { + impl.Dlaset(blas.All, n, n, 0, 1, q, ldq) + } + + bi := blas64.Implementation() + minTol := math.Min(tola, tolb) + + // Loop until convergence. + upper := false + for cycles = 1; cycles <= maxit; cycles++ { + upper = !upper + + for i := 0; i < l-1; i++ { + for j := i + 1; j < l; j++ { + var a1, a2, a3 float64 + if k+i < m { + a1 = a[(k+i)*lda+n-l+i] + } + if k+j < m { + a3 = a[(k+j)*lda+n-l+j] + } + + b1 := b[i*ldb+n-l+i] + b3 := b[j*ldb+n-l+j] + + var b2 float64 + if upper { + if k+i < m { + a2 = a[(k+i)*lda+n-l+j] + } + b2 = b[i*ldb+n-l+j] + } else { + if k+j < m { + a2 = a[(k+j)*lda+n-l+i] + } + b2 = b[j*ldb+n-l+i] + } + + csu, snu, csv, snv, csq, snq := impl.Dlags2(upper, a1, a2, a3, b1, b2, b3) + + // Update (k+i)-th and (k+j)-th rows of matrix A: U^T*A. + if k+j < m { + bi.Drot(l, a[(k+j)*lda+n-l:], 1, a[(k+i)*lda+n-l:], 1, csu, snu) + } + + // Update i-th and j-th rows of matrix B: V^T*B. + bi.Drot(l, b[j*ldb+n-l:], 1, b[i*ldb+n-l:], 1, csv, snv) + + // Update (n-l+i)-th and (n-l+j)-th columns of matrices + // A and B: A*Q and B*Q. + bi.Drot(min(k+l, m), a[n-l+j:], lda, a[n-l+i:], lda, csq, snq) + bi.Drot(l, b[n-l+j:], ldb, b[n-l+i:], ldb, csq, snq) + + if upper { + if k+i < m { + a[(k+i)*lda+n-l+j] = 0 + } + b[i*ldb+n-l+j] = 0 + } else { + if k+j < m { + a[(k+j)*lda+n-l+i] = 0 + } + b[j*ldb+n-l+i] = 0 + } + + // Update orthogonal matrices U, V, Q, if desired. + if wantu && k+j < m { + bi.Drot(m, u[k+j:], ldu, u[k+i:], ldu, csu, snu) + } + if wantv { + bi.Drot(p, v[j:], ldv, v[i:], ldv, csv, snv) + } + if wantq { + bi.Drot(n, q[n-l+j:], ldq, q[n-l+i:], ldq, csq, snq) + } + } + } + + if !upper { + // The matrices A13 and B13 were lower triangular at the start + // of the cycle, and are now upper triangular. + // + // Convergence test: test the parallelism of the corresponding + // rows of A and B. + var error float64 + for i := 0; i < min(l, m-k); i++ { + bi.Dcopy(l-i, a[(k+i)*lda+n-l+i:], 1, work, 1) + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, work[l:], 1) + ssmin := impl.Dlapll(l-i, work, 1, work[l:], 1) + error = math.Max(error, ssmin) + } + if math.Abs(error) <= minTol { + // The algorithm has converged. + // Compute the generalized singular value pairs (alpha, beta) + // and set the triangular matrix R to array A. + for i := 0; i < k; i++ { + alpha[i] = 1 + beta[i] = 0 + } + + for i := 0; i < min(l, m-k); i++ { + a1 := a[(k+i)*lda+n-l+i] + b1 := b[i*ldb+n-l+i] + + if a1 != 0 { + gamma := b1 / a1 + + // Change sign if necessary. + if gamma < 0 { + bi.Dscal(l-i, -1, b[i*ldb+n-l+i:], 1) + if wantv { + bi.Dscal(p, -1, v[i:], ldv) + } + } + beta[k+i], alpha[k+i], _ = impl.Dlartg(math.Abs(gamma), 1) + + if alpha[k+i] >= beta[k+i] { + bi.Dscal(l-i, 1/alpha[k+i], a[(k+i)*lda+n-l+i:], 1) + } else { + bi.Dscal(l-i, 1/beta[k+i], b[i*ldb+n-l+i:], 1) + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, a[(k+i)*lda+n-l+i:], 1) + } + } else { + alpha[k+i] = 0 + beta[k+i] = 1 + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, a[(k+i)*lda+n-l+i:], 1) + } + } + + for i := m; i < k+l; i++ { + alpha[i] = 0 + beta[i] = 1 + } + if k+l < n { + for i := k + l; i < n; i++ { + alpha[i] = 0 + beta[i] = 0 + } + } + + return cycles, true + } + } + } + + // The algorithm has not converged after maxit cycles. + return cycles, false +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go new file mode 100644 index 00000000..899c95dd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go @@ -0,0 +1,90 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtrcon estimates the reciprocal of the condition number of a triangular matrix A. +// The condition number computed may be based on the 1-norm or the ∞-norm. +// +// work is a temporary data slice of length at least 3*n and Dtrcon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dtrcon will panic otherwise. +func (impl Implementation) Dtrcon(norm lapack.MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64 { + switch { + case norm != lapack.MaxColumnSum && norm != lapack.MaxRowSum: + panic(badNorm) + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case diag != blas.NonUnit && diag != blas.Unit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return 1 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(work) < 3*n: + panic(shortWork) + case len(iwork) < n: + panic(shortIWork) + } + + bi := blas64.Implementation() + + var rcond float64 + smlnum := dlamchS * float64(n) + + anorm := impl.Dlantr(norm, uplo, diag, n, n, a, lda, work) + + if anorm <= 0 { + return rcond + } + var ainvnm float64 + var normin bool + kase1 := 2 + if norm == lapack.MaxColumnSum { + kase1 = 1 + } + var kase int + isave := new([3]int) + var scale float64 + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / anorm) / ainvnm + } + return rcond + } + if kase == kase1 { + scale = impl.Dlatrs(uplo, blas.NoTrans, diag, normin, n, a, lda, work, work[2*n:]) + } else { + scale = impl.Dlatrs(uplo, blas.Trans, diag, normin, n, a, lda, work, work[2*n:]) + } + normin = true + if scale != 1 { + ix := bi.Idamax(n, work, 1) + xnorm := math.Abs(work[ix]) + if scale == 0 || scale < xnorm*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go new file mode 100644 index 00000000..17121b8d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go @@ -0,0 +1,885 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtrevc3 computes some or all of the right and/or left eigenvectors of an n×n +// upper quasi-triangular matrix T in Schur canonical form. Matrices of this +// type are produced by the Schur factorization of a real general matrix A +// A = Q T Q^T, +// as computed by Dhseqr. +// +// The right eigenvector x of T corresponding to an +// eigenvalue λ is defined by +// T x = λ x, +// and the left eigenvector y is defined by +// y^T T = λ y^T. +// +// The eigenvalues are read directly from the diagonal blocks of T. +// +// This routine returns the matrices X and/or Y of right and left eigenvectors +// of T, or the products Q*X and/or Q*Y, where Q is an input matrix. If Q is the +// orthogonal factor that reduces a matrix A to Schur form T, then Q*X and Q*Y +// are the matrices of right and left eigenvectors of A. +// +// If side == lapack.EVRight, only right eigenvectors will be computed. +// If side == lapack.EVLeft, only left eigenvectors will be computed. +// If side == lapack.EVBoth, both right and left eigenvectors will be computed. +// For other values of side, Dtrevc3 will panic. +// +// If howmny == lapack.EVAll, all right and/or left eigenvectors will be +// computed. +// If howmny == lapack.EVAllMulQ, all right and/or left eigenvectors will be +// computed and multiplied from left by the matrices in VR and/or VL. +// If howmny == lapack.EVSelected, right and/or left eigenvectors will be +// computed as indicated by selected. +// For other values of howmny, Dtrevc3 will panic. +// +// selected specifies which eigenvectors will be computed. It must have length n +// if howmny == lapack.EVSelected, and it is not referenced otherwise. +// If w_j is a real eigenvalue, the corresponding real eigenvector will be +// computed if selected[j] is true. +// If w_j and w_{j+1} are the real and imaginary parts of a complex eigenvalue, +// the corresponding complex eigenvector is computed if either selected[j] or +// selected[j+1] is true, and on return selected[j] will be set to true and +// selected[j+1] will be set to false. +// +// VL and VR are n×mm matrices. If howmny is lapack.EVAll or +// lapack.AllEVMulQ, mm must be at least n. If howmny is +// lapack.EVSelected, mm must be large enough to store the selected +// eigenvectors. Each selected real eigenvector occupies one column and each +// selected complex eigenvector occupies two columns. If mm is not sufficiently +// large, Dtrevc3 will panic. +// +// On entry, if howmny is lapack.EVAllMulQ, it is assumed that VL (if side +// is lapack.EVLeft or lapack.EVBoth) contains an n×n matrix QL, +// and that VR (if side is lapack.EVLeft or lapack.EVBoth) contains +// an n×n matrix QR. QL and QR are typically the orthogonal matrix Q of Schur +// vectors returned by Dhseqr. +// +// On return, if side is lapack.EVLeft or lapack.EVBoth, +// VL will contain: +// if howmny == lapack.EVAll, the matrix Y of left eigenvectors of T, +// if howmny == lapack.EVAllMulQ, the matrix Q*Y, +// if howmny == lapack.EVSelected, the left eigenvectors of T specified by +// selected, stored consecutively in the +// columns of VL, in the same order as their +// eigenvalues. +// VL is not referenced if side == lapack.EVRight. +// +// On return, if side is lapack.EVRight or lapack.EVBoth, +// VR will contain: +// if howmny == lapack.EVAll, the matrix X of right eigenvectors of T, +// if howmny == lapack.EVAllMulQ, the matrix Q*X, +// if howmny == lapack.EVSelected, the left eigenvectors of T specified by +// selected, stored consecutively in the +// columns of VR, in the same order as their +// eigenvalues. +// VR is not referenced if side == lapack.EVLeft. +// +// Complex eigenvectors corresponding to a complex eigenvalue are stored in VL +// and VR in two consecutive columns, the first holding the real part, and the +// second the imaginary part. +// +// Each eigenvector will be normalized so that the element of largest magnitude +// has magnitude 1. Here the magnitude of a complex number (x,y) is taken to be +// |x| + |y|. +// +// work must have length at least lwork and lwork must be at least max(1,3*n), +// otherwise Dtrevc3 will panic. For optimum performance, lwork should be at +// least n+2*n*nb, where nb is the optimal blocksize. +// +// If lwork == -1, instead of performing Dtrevc3, the function only estimates +// the optimal workspace size based on n and stores it into work[0]. +// +// Dtrevc3 returns the number of columns in VL and/or VR actually used to store +// the eigenvectors. +// +// Dtrevc3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrevc3(side lapack.EVSide, howmny lapack.EVHowMany, selected []bool, n int, t []float64, ldt int, vl []float64, ldvl int, vr []float64, ldvr int, mm int, work []float64, lwork int) (m int) { + bothv := side == lapack.EVBoth + rightv := side == lapack.EVRight || bothv + leftv := side == lapack.EVLeft || bothv + switch { + case !rightv && !leftv: + panic(badEVSide) + case howmny != lapack.EVAll && howmny != lapack.EVAllMulQ && howmny != lapack.EVSelected: + panic(badEVHowMany) + case n < 0: + panic(nLT0) + case ldt < max(1, n): + panic(badLdT) + case mm < 0: + panic(mmLT0) + case ldvl < 1: + // ldvl and ldvr are also checked below after the computation of + // m (number of columns of VL and VR) in case of howmny == EVSelected. + panic(badLdVL) + case ldvr < 1: + panic(badLdVR) + case lwork < max(1, 3*n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return 0 + } + + // Normally we don't check slice lengths until after the workspace + // query. However, even in case of the workspace query we need to + // compute and return the value of m, and since the computation accesses t, + // we put the length check of t here. + if len(t) < (n-1)*ldt+n { + panic(shortT) + } + + if howmny == lapack.EVSelected { + if len(selected) != n { + panic(badLenSelected) + } + // Set m to the number of columns required to store the selected + // eigenvectors, and standardize the slice selected. + // Each selected real eigenvector occupies one column and each + // selected complex eigenvector occupies two columns. + for j := 0; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // Diagonal 1×1 block corresponding to a + // real eigenvalue. + if selected[j] { + m++ + } + j++ + } else { + // Diagonal 2×2 block corresponding to a + // complex eigenvalue. + if selected[j] || selected[j+1] { + selected[j] = true + selected[j+1] = false + m += 2 + } + j += 2 + } + } + } else { + m = n + } + if mm < m { + panic(badMm) + } + + // Quick return in case of a workspace query. + nb := impl.Ilaenv(1, "DTREVC", string(side)+string(howmny), n, -1, -1, -1) + if lwork == -1 { + work[0] = float64(n + 2*n*nb) + return m + } + + // Quick return if no eigenvectors were selected. + if m == 0 { + return 0 + } + + switch { + case leftv && ldvl < mm: + panic(badLdVL) + case leftv && len(vl) < (n-1)*ldvl+mm: + panic(shortVL) + + case rightv && ldvr < mm: + panic(badLdVR) + case rightv && len(vr) < (n-1)*ldvr+mm: + panic(shortVR) + } + + // Use blocked version of back-transformation if sufficient workspace. + // Zero-out the workspace to avoid potential NaN propagation. + const ( + nbmin = 8 + nbmax = 128 + ) + if howmny == lapack.EVAllMulQ && lwork >= n+2*n*nbmin { + nb = min((lwork-n)/(2*n), nbmax) + impl.Dlaset(blas.All, n, 1+2*nb, 0, 0, work[:n+2*nb*n], 1+2*nb) + } else { + nb = 1 + } + + // Set the constants to control overflow. + ulp := dlamchP + smlnum := float64(n) / ulp * dlamchS + bignum := (1 - ulp) / smlnum + + // Split work into a vector of column norms and an n×2*nb matrix b. + norms := work[:n] + ldb := 2 * nb + b := work[n : n+n*ldb] + + // Compute 1-norm of each column of strictly upper triangular part of T + // to control overflow in triangular solver. + norms[0] = 0 + for j := 1; j < n; j++ { + var cn float64 + for i := 0; i < j; i++ { + cn += math.Abs(t[i*ldt+j]) + } + norms[j] = cn + } + + bi := blas64.Implementation() + + var ( + x [4]float64 + + iv int // Index of column in current block. + is int + + // ip is used below to specify the real or complex eigenvalue: + // ip == 0, real eigenvalue, + // 1, first of conjugate complex pair (wr,wi), + // -1, second of conjugate complex pair (wr,wi). + ip int + iscomplex [nbmax]int // Stores ip for each column in current block. + ) + + if side == lapack.EVLeft { + goto leftev + } + + // Compute right eigenvectors. + + // For complex right vector, iv-1 is for real part and iv for complex + // part. Non-blocked version always uses iv=1, blocked version starts + // with iv=nb-1 and goes down to 0 or 1. + iv = max(2, nb) - 1 + ip = 0 + is = m - 1 + for ki := n - 1; ki >= 0; ki-- { + if ip == -1 { + // Previous iteration (ki+1) was second of + // conjugate pair, so this ki is first of + // conjugate pair. + ip = 1 + continue + } + + if ki == 0 || t[ki*ldt+ki-1] == 0 { + // Last column or zero on sub-diagonal, so this + // ki must be real eigenvalue. + ip = 0 + } else { + // Non-zero on sub-diagonal, so this ki is + // second of conjugate pair. + ip = -1 + } + + if howmny == lapack.EVSelected { + if ip == 0 { + if !selected[ki] { + continue + } + } else if !selected[ki-1] { + continue + } + } + + // Compute the ki-th eigenvalue (wr,wi). + wr := t[ki*ldt+ki] + var wi float64 + if ip != 0 { + wi = math.Sqrt(math.Abs(t[ki*ldt+ki-1])) * math.Sqrt(math.Abs(t[(ki-1)*ldt+ki])) + } + smin := math.Max(ulp*(math.Abs(wr)+math.Abs(wi)), smlnum) + + if ip == 0 { + // Real right eigenvector. + + b[ki*ldb+iv] = 1 + // Form right-hand side. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = -t[k*ldt+ki] + } + // Solve upper quasi-triangular system: + // [ T[0:ki,0:ki] - wr ]*X = scale*b. + for j := ki - 1; j >= 0; { + if j == 0 || t[j*ldt+j-1] == 0 { + // 1×1 diagonal block. + scale, xnorm, _ := impl.Dlaln2(false, 1, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:1], 2) + // Scale X[0,0] to avoid overflow when updating the + // right-hand side. + if xnorm > 1 && norms[j] > bignum/xnorm { + x[0] /= xnorm + scale /= xnorm + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[j*ldb+iv] = x[0] + // Update right-hand side. + bi.Daxpy(j, -x[0], t[j:], ldt, b[iv:], ldb) + j-- + } else { + // 2×2 diagonal block. + scale, xnorm, _ := impl.Dlaln2(false, 2, 1, smin, 1, t[(j-1)*ldt+j-1:], ldt, + 1, 1, b[(j-1)*ldb+iv:], ldb, wr, 0, x[:3], 2) + // Scale X[0,0] and X[1,0] to avoid overflow + // when updating the right-hand side. + if xnorm > 1 { + beta := math.Max(norms[j-1], norms[j]) + if beta > bignum/xnorm { + x[0] /= xnorm + x[2] /= xnorm + scale /= xnorm + } + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[(j-1)*ldb+iv] = x[0] + b[j*ldb+iv] = x[2] + // Update right-hand side. + bi.Daxpy(j-1, -x[0], t[j-1:], ldt, b[iv:], ldb) + bi.Daxpy(j-1, -x[2], t[j:], ldt, b[iv:], ldb) + j -= 2 + } + } + // Copy the vector x or Q*x to VR and normalize. + switch { + case howmny != lapack.EVAllMulQ: + // No back-transform: copy x to VR and normalize. + bi.Dcopy(ki+1, b[iv:], ldb, vr[is:], ldvr) + ii := bi.Idamax(ki+1, vr[is:], ldvr) + remax := 1 / math.Abs(vr[ii*ldvr+is]) + bi.Dscal(ki+1, remax, vr[is:], ldvr) + for k := ki + 1; k < n; k++ { + vr[k*ldvr+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if ki > 0 { + bi.Dgemv(blas.NoTrans, n, ki, 1, vr, ldvr, b[iv:], ldb, + b[ki*ldb+iv], vr[ki:], ldvr) + } + ii := bi.Idamax(n, vr[ki:], ldvr) + remax := 1 / math.Abs(vr[ii*ldvr+ki]) + bi.Dscal(n, remax, vr[ki:], ldvr) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out below vector. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv] = 0 + } + iscomplex[iv] = ip + // Back-transform and normalization is done below. + } + } else { + // Complex right eigenvector. + + // Initial solve + // [ ( T[ki-1,ki-1] T[ki-1,ki] ) - (wr + i*wi) ]*X = 0. + // [ ( T[ki, ki-1] T[ki, ki] ) ] + if math.Abs(t[(ki-1)*ldt+ki]) >= math.Abs(t[ki*ldt+ki-1]) { + b[(ki-1)*ldb+iv-1] = 1 + b[ki*ldb+iv] = wi / t[(ki-1)*ldt+ki] + } else { + b[(ki-1)*ldb+iv-1] = -wi / t[ki*ldt+ki-1] + b[ki*ldb+iv] = 1 + } + b[ki*ldb+iv-1] = 0 + b[(ki-1)*ldb+iv] = 0 + // Form right-hand side. + for k := 0; k < ki-1; k++ { + b[k*ldb+iv-1] = -b[(ki-1)*ldb+iv-1] * t[k*ldt+ki-1] + b[k*ldb+iv] = -b[ki*ldb+iv] * t[k*ldt+ki] + } + // Solve upper quasi-triangular system: + // [ T[0:ki-1,0:ki-1] - (wr+i*wi) ]*X = scale*(b1+i*b2) + for j := ki - 2; j >= 0; { + if j == 0 || t[j*ldt+j-1] == 0 { + // 1×1 diagonal block. + + scale, xnorm, _ := impl.Dlaln2(false, 1, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv-1:], ldb, wr, wi, x[:2], 2) + // Scale X[0,0] and X[0,1] to avoid + // overflow when updating the right-hand side. + if xnorm > 1 && norms[j] > bignum/xnorm { + x[0] /= xnorm + x[1] /= xnorm + scale /= xnorm + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv-1:], ldb) + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[j*ldb+iv-1] = x[0] + b[j*ldb+iv] = x[1] + // Update the right-hand side. + bi.Daxpy(j, -x[0], t[j:], ldt, b[iv-1:], ldb) + bi.Daxpy(j, -x[1], t[j:], ldt, b[iv:], ldb) + j-- + } else { + // 2×2 diagonal block. + + scale, xnorm, _ := impl.Dlaln2(false, 2, 2, smin, 1, t[(j-1)*ldt+j-1:], ldt, + 1, 1, b[(j-1)*ldb+iv-1:], ldb, wr, wi, x[:], 2) + // Scale X to avoid overflow when updating + // the right-hand side. + if xnorm > 1 { + beta := math.Max(norms[j-1], norms[j]) + if beta > bignum/xnorm { + rec := 1 / xnorm + x[0] *= rec + x[1] *= rec + x[2] *= rec + x[3] *= rec + scale *= rec + } + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv-1:], ldb) + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[(j-1)*ldb+iv-1] = x[0] + b[(j-1)*ldb+iv] = x[1] + b[j*ldb+iv-1] = x[2] + b[j*ldb+iv] = x[3] + // Update the right-hand side. + bi.Daxpy(j-1, -x[0], t[j-1:], ldt, b[iv-1:], ldb) + bi.Daxpy(j-1, -x[1], t[j-1:], ldt, b[iv:], ldb) + bi.Daxpy(j-1, -x[2], t[j:], ldt, b[iv-1:], ldb) + bi.Daxpy(j-1, -x[3], t[j:], ldt, b[iv:], ldb) + j -= 2 + } + } + + // Copy the vector x or Q*x to VR and normalize. + switch { + case howmny != lapack.EVAllMulQ: + // No back-transform: copy x to VR and normalize. + bi.Dcopy(ki+1, b[iv-1:], ldb, vr[is-1:], ldvr) + bi.Dcopy(ki+1, b[iv:], ldb, vr[is:], ldvr) + emax := 0.0 + for k := 0; k <= ki; k++ { + emax = math.Max(emax, math.Abs(vr[k*ldvr+is-1])+math.Abs(vr[k*ldvr+is])) + } + remax := 1 / emax + bi.Dscal(ki+1, remax, vr[is-1:], ldvr) + bi.Dscal(ki+1, remax, vr[is:], ldvr) + for k := ki + 1; k < n; k++ { + vr[k*ldvr+is-1] = 0 + vr[k*ldvr+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if ki-1 > 0 { + bi.Dgemv(blas.NoTrans, n, ki-1, 1, vr, ldvr, b[iv-1:], ldb, + b[(ki-1)*ldb+iv-1], vr[ki-1:], ldvr) + bi.Dgemv(blas.NoTrans, n, ki-1, 1, vr, ldvr, b[iv:], ldb, + b[ki*ldb+iv], vr[ki:], ldvr) + } else { + bi.Dscal(n, b[(ki-1)*ldb+iv-1], vr[ki-1:], ldvr) + bi.Dscal(n, b[ki*ldb+iv], vr[ki:], ldvr) + } + emax := 0.0 + for k := 0; k < n; k++ { + emax = math.Max(emax, math.Abs(vr[k*ldvr+ki-1])+math.Abs(vr[k*ldvr+ki])) + } + remax := 1 / emax + bi.Dscal(n, remax, vr[ki-1:], ldvr) + bi.Dscal(n, remax, vr[ki:], ldvr) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out below vector. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv-1] = 0 + b[k*ldb+iv] = 0 + } + iscomplex[iv-1] = -ip + iscomplex[iv] = ip + iv-- + // Back-transform and normalization is done below. + } + } + if nb > 1 { + // Blocked version of back-transform. + + // For complex case, ki2 includes both vectors (ki-1 and ki). + ki2 := ki + if ip != 0 { + ki2-- + } + // Columns iv:nb of b are valid vectors. + // When the number of vectors stored reaches nb-1 or nb, + // or if this was last vector, do the Gemm. + if iv < 2 || ki2 == 0 { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, nb-iv, ki2+nb-iv, + 1, vr, ldvr, b[iv:], ldb, + 0, b[nb+iv:], ldb) + // Normalize vectors. + var remax float64 + for k := iv; k < nb; k++ { + if iscomplex[k] == 0 { + // Real eigenvector. + ii := bi.Idamax(n, b[nb+k:], ldb) + remax = 1 / math.Abs(b[ii*ldb+nb+k]) + } else if iscomplex[k] == 1 { + // First eigenvector of conjugate pair. + emax := 0.0 + for ii := 0; ii < n; ii++ { + emax = math.Max(emax, math.Abs(b[ii*ldb+nb+k])+math.Abs(b[ii*ldb+nb+k+1])) + } + remax = 1 / emax + // Second eigenvector of conjugate pair + // will reuse this value of remax. + } + bi.Dscal(n, remax, b[nb+k:], ldb) + } + impl.Dlacpy(blas.All, n, nb-iv, b[nb+iv:], ldb, vr[ki2:], ldvr) + iv = nb - 1 + } else { + iv-- + } + } + is-- + if ip != 0 { + is-- + } + } + + if side == lapack.EVRight { + return m + } + +leftev: + // Compute left eigenvectors. + + // For complex left vector, iv is for real part and iv+1 for complex + // part. Non-blocked version always uses iv=0. Blocked version starts + // with iv=0, goes up to nb-2 or nb-1. + iv = 0 + ip = 0 + is = 0 + for ki := 0; ki < n; ki++ { + if ip == 1 { + // Previous iteration ki-1 was first of conjugate pair, + // so this ki is second of conjugate pair. + ip = -1 + continue + } + + if ki == n-1 || t[(ki+1)*ldt+ki] == 0 { + // Last column or zero on sub-diagonal, so this ki must + // be real eigenvalue. + ip = 0 + } else { + // Non-zero on sub-diagonal, so this ki is first of + // conjugate pair. + ip = 1 + } + if howmny == lapack.EVSelected && !selected[ki] { + continue + } + + // Compute the ki-th eigenvalue (wr,wi). + wr := t[ki*ldt+ki] + var wi float64 + if ip != 0 { + wi = math.Sqrt(math.Abs(t[ki*ldt+ki+1])) * math.Sqrt(math.Abs(t[(ki+1)*ldt+ki])) + } + smin := math.Max(ulp*(math.Abs(wr)+math.Abs(wi)), smlnum) + + if ip == 0 { + // Real left eigenvector. + + b[ki*ldb+iv] = 1 + // Form right-hand side. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv] = -t[ki*ldt+k] + } + // Solve transposed quasi-triangular system: + // [ T[ki+1:n,ki+1:n] - wr ]^T * X = scale*b + vmax := 1.0 + vcrit := bignum + for j := ki + 1; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // 1×1 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side. + if norms[j] > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + vmax = 1 + } + b[j*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j:], ldt, b[(ki+1)*ldb+iv:], ldb) + // Solve [ T[j,j] - wr ]^T * X = b. + scale, _, _ := impl.Dlaln2(false, 1, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:1], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + } + b[j*ldb+iv] = x[0] + vmax = math.Max(math.Abs(b[j*ldb+iv]), vmax) + vcrit = bignum / vmax + j++ + } else { + // 2×2 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side. + beta := math.Max(norms[j], norms[j+1]) + if beta > vcrit { + bi.Dscal(n-ki+1, 1/vmax, b[ki*ldb+iv:], 1) + vmax = 1 + } + b[j*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j:], ldt, b[(ki+1)*ldb+iv:], ldb) + b[(j+1)*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j+1:], ldt, b[(ki+1)*ldb+iv:], ldb) + // Solve + // [ T[j,j]-wr T[j,j+1] ]^T * X = scale*[ b1 ] + // [ T[j+1,j] T[j+1,j+1]-wr ] [ b2 ] + scale, _, _ := impl.Dlaln2(true, 2, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:3], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + } + b[j*ldb+iv] = x[0] + b[(j+1)*ldb+iv] = x[2] + vmax = math.Max(vmax, math.Max(math.Abs(b[j*ldb+iv]), math.Abs(b[(j+1)*ldb+iv]))) + vcrit = bignum / vmax + j += 2 + } + } + // Copy the vector x or Q*x to VL and normalize. + switch { + case howmny != lapack.EVAllMulQ: + // No back-transform: copy x to VL and normalize. + bi.Dcopy(n-ki, b[ki*ldb+iv:], ldb, vl[ki*ldvl+is:], ldvl) + ii := bi.Idamax(n-ki, vl[ki*ldvl+is:], ldvl) + ki + remax := 1 / math.Abs(vl[ii*ldvl+is]) + bi.Dscal(n-ki, remax, vl[ki*ldvl+is:], ldvl) + for k := 0; k < ki; k++ { + vl[k*ldvl+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with Gemv, Q*x. + if n-ki-1 > 0 { + bi.Dgemv(blas.NoTrans, n, n-ki-1, + 1, vl[ki+1:], ldvl, b[(ki+1)*ldb+iv:], ldb, + b[ki*ldb+iv], vl[ki:], ldvl) + } + ii := bi.Idamax(n, vl[ki:], ldvl) + remax := 1 / math.Abs(vl[ii*ldvl+ki]) + bi.Dscal(n, remax, vl[ki:], ldvl) + default: + // Version 2: back-transform block of vectors with Gemm + // zero out above vector. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = 0 + } + iscomplex[iv] = ip + // Back-transform and normalization is done below. + } + } else { + // Complex left eigenvector. + + // Initial solve: + // [ [ T[ki,ki] T[ki,ki+1] ]^T - (wr - i* wi) ]*X = 0. + // [ [ T[ki+1,ki] T[ki+1,ki+1] ] ] + if math.Abs(t[ki*ldt+ki+1]) >= math.Abs(t[(ki+1)*ldt+ki]) { + b[ki*ldb+iv] = wi / t[ki*ldt+ki+1] + b[(ki+1)*ldb+iv+1] = 1 + } else { + b[ki*ldb+iv] = 1 + b[(ki+1)*ldb+iv+1] = -wi / t[(ki+1)*ldt+ki] + } + b[(ki+1)*ldb+iv] = 0 + b[ki*ldb+iv+1] = 0 + // Form right-hand side. + for k := ki + 2; k < n; k++ { + b[k*ldb+iv] = -b[ki*ldb+iv] * t[ki*ldt+k] + b[k*ldb+iv+1] = -b[(ki+1)*ldb+iv+1] * t[(ki+1)*ldt+k] + } + // Solve transposed quasi-triangular system: + // [ T[ki+2:n,ki+2:n]^T - (wr-i*wi) ]*X = b1+i*b2 + vmax := 1.0 + vcrit := bignum + for j := ki + 2; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // 1×1 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side elements. + if norms[j] > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, rec, b[ki*ldb+iv+1:], ldb) + vmax = 1 + } + b[j*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[j*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + // Solve [ T[j,j]-(wr-i*wi) ]*(X11+i*X12) = b1+i*b2. + scale, _, _ := impl.Dlaln2(false, 1, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, -wi, x[:2], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, scale, b[ki*ldb+iv+1:], ldb) + } + b[j*ldb+iv] = x[0] + b[j*ldb+iv+1] = x[1] + vmax = math.Max(vmax, math.Max(math.Abs(b[j*ldb+iv]), math.Abs(b[j*ldb+iv+1]))) + vcrit = bignum / vmax + j++ + } else { + // 2×2 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side elements. + if math.Max(norms[j], norms[j+1]) > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, rec, b[ki*ldb+iv+1:], ldb) + vmax = 1 + } + b[j*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[j*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + b[(j+1)*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j+1:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[(j+1)*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j+1:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + // Solve 2×2 complex linear equation + // [ [T[j,j] T[j,j+1] ]^T - (wr-i*wi)*I ]*X = scale*b + // [ [T[j+1,j] T[j+1,j+1]] ] + scale, _, _ := impl.Dlaln2(true, 2, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, -wi, x[:], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, scale, b[ki*ldb+iv+1:], ldb) + } + b[j*ldb+iv] = x[0] + b[j*ldb+iv+1] = x[1] + b[(j+1)*ldb+iv] = x[2] + b[(j+1)*ldb+iv+1] = x[3] + vmax01 := math.Max(math.Abs(x[0]), math.Abs(x[1])) + vmax23 := math.Max(math.Abs(x[2]), math.Abs(x[3])) + vmax = math.Max(vmax, math.Max(vmax01, vmax23)) + vcrit = bignum / vmax + j += 2 + } + } + // Copy the vector x or Q*x to VL and normalize. + switch { + case howmny != lapack.EVAllMulQ: + // No back-transform: copy x to VL and normalize. + bi.Dcopy(n-ki, b[ki*ldb+iv:], ldb, vl[ki*ldvl+is:], ldvl) + bi.Dcopy(n-ki, b[ki*ldb+iv+1:], ldb, vl[ki*ldvl+is+1:], ldvl) + emax := 0.0 + for k := ki; k < n; k++ { + emax = math.Max(emax, math.Abs(vl[k*ldvl+is])+math.Abs(vl[k*ldvl+is+1])) + } + remax := 1 / emax + bi.Dscal(n-ki, remax, vl[ki*ldvl+is:], ldvl) + bi.Dscal(n-ki, remax, vl[ki*ldvl+is+1:], ldvl) + for k := 0; k < ki; k++ { + vl[k*ldvl+is] = 0 + vl[k*ldvl+is+1] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if n-ki-2 > 0 { + bi.Dgemv(blas.NoTrans, n, n-ki-2, + 1, vl[ki+2:], ldvl, b[(ki+2)*ldb+iv:], ldb, + b[ki*ldb+iv], vl[ki:], ldvl) + bi.Dgemv(blas.NoTrans, n, n-ki-2, + 1, vl[ki+2:], ldvl, b[(ki+2)*ldb+iv+1:], ldb, + b[(ki+1)*ldb+iv+1], vl[ki+1:], ldvl) + } else { + bi.Dscal(n, b[ki*ldb+iv], vl[ki:], ldvl) + bi.Dscal(n, b[(ki+1)*ldb+iv+1], vl[ki+1:], ldvl) + } + emax := 0.0 + for k := 0; k < n; k++ { + emax = math.Max(emax, math.Abs(vl[k*ldvl+ki])+math.Abs(vl[k*ldvl+ki+1])) + } + remax := 1 / emax + bi.Dscal(n, remax, vl[ki:], ldvl) + bi.Dscal(n, remax, vl[ki+1:], ldvl) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out above vector. + // Could go from ki-nv+1 to ki-1. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = 0 + b[k*ldb+iv+1] = 0 + } + iscomplex[iv] = ip + iscomplex[iv+1] = -ip + iv++ + // Back-transform and normalization is done below. + } + } + if nb > 1 { + // Blocked version of back-transform. + // For complex case, ki2 includes both vectors ki and ki+1. + ki2 := ki + if ip != 0 { + ki2++ + } + // Columns [0:iv] of work are valid vectors. When the + // number of vectors stored reaches nb-1 or nb, or if + // this was last vector, do the Gemm. + if iv >= nb-2 || ki2 == n-1 { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, iv+1, n-ki2+iv, + 1, vl[ki2-iv:], ldvl, b[(ki2-iv)*ldb:], ldb, + 0, b[nb:], ldb) + // Normalize vectors. + var remax float64 + for k := 0; k <= iv; k++ { + if iscomplex[k] == 0 { + // Real eigenvector. + ii := bi.Idamax(n, b[nb+k:], ldb) + remax = 1 / math.Abs(b[ii*ldb+nb+k]) + } else if iscomplex[k] == 1 { + // First eigenvector of conjugate pair. + emax := 0.0 + for ii := 0; ii < n; ii++ { + emax = math.Max(emax, math.Abs(b[ii*ldb+nb+k])+math.Abs(b[ii*ldb+nb+k+1])) + } + remax = 1 / emax + // Second eigenvector of conjugate pair + // will reuse this value of remax. + } + bi.Dscal(n, remax, b[nb+k:], ldb) + } + impl.Dlacpy(blas.All, n, iv+1, b[nb:], ldb, vl[ki2-iv:], ldvl) + iv = 0 + } else { + iv++ + } + } + is++ + if ip != 0 { + is++ + } + } + + return m +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go new file mode 100644 index 00000000..9f3f90ba --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go @@ -0,0 +1,230 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/lapack" + +// Dtrexc reorders the real Schur factorization of a n×n real matrix +// A = Q*T*Q^T +// so that the diagonal block of T with row index ifst is moved to row ilst. +// +// On entry, T must be in Schur canonical form, that is, block upper triangular +// with 1×1 and 2×2 diagonal blocks; each 2×2 diagonal block has its diagonal +// elements equal and its off-diagonal elements of opposite sign. +// +// On return, T will be reordered by an orthogonal similarity transformation Z +// as Z^T*T*Z, and will be again in Schur canonical form. +// +// If compq is lapack.UpdateSchur, on return the matrix Q of Schur vectors will be +// updated by post-multiplying it with Z. +// If compq is lapack.UpdateSchurNone, the matrix Q is not referenced and will not be +// updated. +// For other values of compq Dtrexc will panic. +// +// ifst and ilst specify the reordering of the diagonal blocks of T. The block +// with row index ifst is moved to row ilst, by a sequence of transpositions +// between adjacent blocks. +// +// If ifst points to the second row of a 2×2 block, ifstOut will point to the +// first row, otherwise it will be equal to ifst. +// +// ilstOut will point to the first row of the block in its final position. If ok +// is true, ilstOut may differ from ilst by +1 or -1. +// +// It must hold that +// 0 <= ifst < n, and 0 <= ilst < n, +// otherwise Dtrexc will panic. +// +// If ok is false, two adjacent blocks were too close to swap because the +// problem is very ill-conditioned. T may have been partially reordered, and +// ilstOut will point to the first row of the block at the position to which it +// has been moved. +// +// work must have length at least n, otherwise Dtrexc will panic. +// +// Dtrexc is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrexc(compq lapack.UpdateSchurComp, n int, t []float64, ldt int, q []float64, ldq int, ifst, ilst int, work []float64) (ifstOut, ilstOut int, ok bool) { + switch { + case compq != lapack.UpdateSchur && compq != lapack.UpdateSchurNone: + panic(badUpdateSchurComp) + case n < 0: + panic(nLT0) + case ldt < max(1, n): + panic(badLdT) + case ldq < 1, compq == lapack.UpdateSchur && ldq < n: + panic(badLdQ) + case (ifst < 0 || n <= ifst) && n > 0: + panic(badIfst) + case (ilst < 0 || n <= ilst) && n > 0: + panic(badIlst) + } + + // Quick return if possible. + if n == 0 { + return ifst, ilst, true + } + + switch { + case len(t) < (n-1)*ldt+n: + panic(shortT) + case compq == lapack.UpdateSchur && len(q) < (n-1)*ldq+n: + panic(shortQ) + case len(work) < n: + panic(shortWork) + } + + // Quick return if possible. + if n == 1 { + return ifst, ilst, true + } + + // Determine the first row of specified block + // and find out it is 1×1 or 2×2. + if ifst > 0 && t[ifst*ldt+ifst-1] != 0 { + ifst-- + } + nbf := 1 // Size of the first block. + if ifst+1 < n && t[(ifst+1)*ldt+ifst] != 0 { + nbf = 2 + } + // Determine the first row of the final block + // and find out it is 1×1 or 2×2. + if ilst > 0 && t[ilst*ldt+ilst-1] != 0 { + ilst-- + } + nbl := 1 // Size of the last block. + if ilst+1 < n && t[(ilst+1)*ldt+ilst] != 0 { + nbl = 2 + } + + ok = true + wantq := compq == lapack.UpdateSchur + + switch { + case ifst == ilst: + return ifst, ilst, true + + case ifst < ilst: + // Update ilst. + switch { + case nbf == 2 && nbl == 1: + ilst-- + case nbf == 1 && nbl == 2: + ilst++ + } + here := ifst + for here < ilst { + // Swap block with next one below. + if nbf == 1 || nbf == 2 { + // Current block either 1×1 or 2×2. + nbnext := 1 // Size of the next block. + if here+nbf+1 < n && t[(here+nbf+1)*ldt+here+nbf] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, nbf, nbnext, work) + if !ok { + return ifst, here, false + } + here += nbnext + // Test if 2×2 block breaks into two 1×1 blocks. + if nbf == 2 && t[(here+1)*ldt+here] == 0 { + nbf = 3 + } + continue + } + + // Current block consists of two 1×1 blocks each of + // which must be swapped individually. + nbnext := 1 // Size of the next block. + if here+3 < n && t[(here+3)*ldt+here+2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here+1, 1, nbnext, work) + if !ok { + return ifst, here, false + } + if nbnext == 1 { + // Swap two 1×1 blocks, no problems possible. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, nbnext, work) + here++ + continue + } + // Recompute nbnext in case 2×2 split. + if t[(here+2)*ldt+here+1] == 0 { + nbnext = 1 + } + if nbnext == 2 { + // 2×2 block did not split. + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, nbnext, work) + if !ok { + return ifst, here, false + } + } else { + // 2×2 block did split. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, 1, work) + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here+1, 1, 1, work) + } + here += 2 + } + return ifst, here, true + + default: // ifst > ilst + here := ifst + for here > ilst { + // Swap block with next one above. + if nbf == 1 || nbf == 2 { + // Current block either 1×1 or 2×2. + nbnext := 1 + if here-2 >= 0 && t[(here-1)*ldt+here-2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-nbnext, nbnext, nbf, work) + if !ok { + return ifst, here, false + } + here -= nbnext + // Test if 2×2 block breaks into two 1×1 blocks. + if nbf == 2 && t[(here+1)*ldt+here] == 0 { + nbf = 3 + } + continue + } + + // Current block consists of two 1×1 blocks each of + // which must be swapped individually. + nbnext := 1 + if here-2 >= 0 && t[(here-1)*ldt+here-2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-nbnext, nbnext, 1, work) + if !ok { + return ifst, here, false + } + if nbnext == 1 { + // Swap two 1×1 blocks, no problems possible. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, nbnext, 1, work) + here-- + continue + } + // Recompute nbnext in case 2×2 split. + if t[here*ldt+here-1] == 0 { + nbnext = 1 + } + if nbnext == 2 { + // 2×2 block did not split. + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-1, 2, 1, work) + if !ok { + return ifst, here, false + } + } else { + // 2×2 block did split. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, 1, work) + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-1, 1, 1, work) + } + here -= 2 + } + return ifst, here, true + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go new file mode 100644 index 00000000..efc24b65 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go @@ -0,0 +1,69 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrti2 computes the inverse of a triangular matrix, storing the result in place +// into a. This is the BLAS level 2 version of the algorithm. +// +// Dtrti2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrti2(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case diag != blas.NonUnit && diag != blas.Unit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + bi := blas64.Implementation() + + nonUnit := diag == blas.NonUnit + // TODO(btracey): Replace this with a row-major ordering. + if uplo == blas.Upper { + for j := 0; j < n; j++ { + var ajj float64 + if nonUnit { + ajj = 1 / a[j*lda+j] + a[j*lda+j] = ajj + ajj *= -1 + } else { + ajj = -1 + } + bi.Dtrmv(blas.Upper, blas.NoTrans, diag, j, a, lda, a[j:], lda) + bi.Dscal(j, ajj, a[j:], lda) + } + return + } + for j := n - 1; j >= 0; j-- { + var ajj float64 + if nonUnit { + ajj = 1 / a[j*lda+j] + a[j*lda+j] = ajj + ajj *= -1 + } else { + ajj = -1 + } + if j < n-1 { + bi.Dtrmv(blas.Lower, blas.NoTrans, diag, n-j-1, a[(j+1)*lda+j+1:], lda, a[(j+1)*lda+j:], lda) + bi.Dscal(n-j-1, ajj, a[(j+1)*lda+j:], lda) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go new file mode 100644 index 00000000..6ec3663c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go @@ -0,0 +1,72 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrtri computes the inverse of a triangular matrix, storing the result in place +// into a. This is the BLAS level 3 version of the algorithm which builds upon +// Dtrti2 to operate on matrix blocks instead of only individual columns. +// +// Dtrtri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +func (impl Implementation) Dtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case diag != blas.NonUnit && diag != blas.Unit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return true + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + if diag == blas.NonUnit { + for i := 0; i < n; i++ { + if a[i*lda+i] == 0 { + return false + } + } + } + + bi := blas64.Implementation() + + nb := impl.Ilaenv(1, "DTRTRI", "UD", n, -1, -1, -1) + if nb <= 1 || nb > n { + impl.Dtrti2(uplo, diag, n, a, lda) + return true + } + if uplo == blas.Upper { + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dtrmm(blas.Left, blas.Upper, blas.NoTrans, diag, j, jb, 1, a, lda, a[j:], lda) + bi.Dtrsm(blas.Right, blas.Upper, blas.NoTrans, diag, j, jb, -1, a[j*lda+j:], lda, a[j:], lda) + impl.Dtrti2(blas.Upper, diag, jb, a[j*lda+j:], lda) + } + return true + } + nn := ((n - 1) / nb) * nb + for j := nn; j >= 0; j -= nb { + jb := min(nb, n-j) + if j+jb <= n-1 { + bi.Dtrmm(blas.Left, blas.Lower, blas.NoTrans, diag, n-j-jb, jb, 1, a[(j+jb)*lda+j+jb:], lda, a[(j+jb)*lda+j:], lda) + bi.Dtrsm(blas.Right, blas.Lower, blas.NoTrans, diag, n-j-jb, jb, -1, a[j*lda+j:], lda, a[(j+jb)*lda+j:], lda) + } + impl.Dtrti2(blas.Lower, diag, jb, a[j*lda+j:], lda) + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go new file mode 100644 index 00000000..1752dc5c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go @@ -0,0 +1,55 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrtrs solves a triangular system of the form A * X = B or A^T * X = B. Dtrtrs +// returns whether the solve completed successfully. If A is singular, no solve is performed. +func (impl Implementation) Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTrans) + case diag != blas.NonUnit && diag != blas.Unit: + panic(badDiag) + case n < 0: + panic(nLT0) + case nrhs < 0: + panic(nrhsLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, nrhs): + panic(badLdB) + } + + if n == 0 { + return true + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(b) < (n-1)*ldb+nrhs: + panic(shortB) + } + + // Check for singularity. + nounit := diag == blas.NonUnit + if nounit { + for i := 0; i < n; i++ { + if a[i*lda+i] == 0 { + return false + } + } + } + bi := blas64.Implementation() + bi.Dtrsm(blas.Left, uplo, trans, diag, n, nrhs, 1, a, lda, b, ldb) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go b/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go new file mode 100644 index 00000000..3c0cb68e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go @@ -0,0 +1,174 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// This list is duplicated in netlib/lapack/netlib. Keep in sync. +const ( + // Panic strings for bad enumeration values. + badApplyOrtho = "lapack: bad ApplyOrtho" + badBalanceJob = "lapack: bad BalanceJob" + badDiag = "lapack: bad Diag" + badDirect = "lapack: bad Direct" + badEVComp = "lapack: bad EVComp" + badEVHowMany = "lapack: bad EVHowMany" + badEVJob = "lapack: bad EVJob" + badEVSide = "lapack: bad EVSide" + badGSVDJob = "lapack: bad GSVDJob" + badGenOrtho = "lapack: bad GenOrtho" + badLeftEVJob = "lapack: bad LeftEVJob" + badMatrixType = "lapack: bad MatrixType" + badNorm = "lapack: bad Norm" + badPivot = "lapack: bad Pivot" + badRightEVJob = "lapack: bad RightEVJob" + badSVDJob = "lapack: bad SVDJob" + badSchurComp = "lapack: bad SchurComp" + badSchurJob = "lapack: bad SchurJob" + badSide = "lapack: bad Side" + badSort = "lapack: bad Sort" + badStoreV = "lapack: bad StoreV" + badTrans = "lapack: bad Trans" + badUpdateSchurComp = "lapack: bad UpdateSchurComp" + badUplo = "lapack: bad Uplo" + bothSVDOver = "lapack: both jobU and jobVT are lapack.SVDOverwrite" + + // Panic strings for bad numerical and string values. + badIfst = "lapack: ifst out of range" + badIhi = "lapack: ihi out of range" + badIhiz = "lapack: ihiz out of range" + badIlo = "lapack: ilo out of range" + badIloz = "lapack: iloz out of range" + badIlst = "lapack: ilst out of range" + badIsave = "lapack: bad isave value" + badIspec = "lapack: bad ispec value" + badJ1 = "lapack: j1 out of range" + badJpvt = "lapack: bad element of jpvt" + badK1 = "lapack: k1 out of range" + badK2 = "lapack: k2 out of range" + badKacc22 = "lapack: invalid value of kacc22" + badKbot = "lapack: kbot out of range" + badKtop = "lapack: ktop out of range" + badLWork = "lapack: insufficient declared workspace length" + badMm = "lapack: mm out of range" + badN1 = "lapack: bad value of n1" + badN2 = "lapack: bad value of n2" + badNa = "lapack: bad value of na" + badName = "lapack: bad name" + badNh = "lapack: bad value of nh" + badNw = "lapack: bad value of nw" + badPp = "lapack: bad value of pp" + badShifts = "lapack: bad shifts" + i0LT0 = "lapack: i0 < 0" + kGTM = "lapack: k > m" + kGTN = "lapack: k > n" + kLT0 = "lapack: k < 0" + kLT1 = "lapack: k < 1" + kdLT0 = "lapack: kd < 0" + mGTN = "lapack: m > n" + mLT0 = "lapack: m < 0" + mmLT0 = "lapack: mm < 0" + n0LT0 = "lapack: n0 < 0" + nGTM = "lapack: n > m" + nLT0 = "lapack: n < 0" + nLT1 = "lapack: n < 1" + nLTM = "lapack: n < m" + nanCFrom = "lapack: cfrom is NaN" + nanCTo = "lapack: cto is NaN" + nbGTM = "lapack: nb > m" + nbGTN = "lapack: nb > n" + nbLT0 = "lapack: nb < 0" + nccLT0 = "lapack: ncc < 0" + ncvtLT0 = "lapack: ncvt < 0" + negANorm = "lapack: anorm < 0" + negZ = "lapack: negative z value" + nhLT0 = "lapack: nh < 0" + notIsolated = "lapack: block is not isolated" + nrhsLT0 = "lapack: nrhs < 0" + nruLT0 = "lapack: nru < 0" + nshftsLT0 = "lapack: nshfts < 0" + nshftsOdd = "lapack: nshfts must be even" + nvLT0 = "lapack: nv < 0" + offsetGTM = "lapack: offset > m" + offsetLT0 = "lapack: offset < 0" + pLT0 = "lapack: p < 0" + recurLT0 = "lapack: recur < 0" + zeroCFrom = "lapack: zero cfrom" + + // Panic strings for bad slice lengths. + badLenAlpha = "lapack: bad length of alpha" + badLenBeta = "lapack: bad length of beta" + badLenIpiv = "lapack: bad length of ipiv" + badLenJpvt = "lapack: bad length of jpvt" + badLenK = "lapack: bad length of k" + badLenSelected = "lapack: bad length of selected" + badLenSi = "lapack: bad length of si" + badLenSr = "lapack: bad length of sr" + badLenTau = "lapack: bad length of tau" + badLenWi = "lapack: bad length of wi" + badLenWr = "lapack: bad length of wr" + + // Panic strings for insufficient slice lengths. + shortA = "lapack: insufficient length of a" + shortAB = "lapack: insufficient length of ab" + shortAuxv = "lapack: insufficient length of auxv" + shortB = "lapack: insufficient length of b" + shortC = "lapack: insufficient length of c" + shortCNorm = "lapack: insufficient length of cnorm" + shortD = "lapack: insufficient length of d" + shortE = "lapack: insufficient length of e" + shortF = "lapack: insufficient length of f" + shortH = "lapack: insufficient length of h" + shortIWork = "lapack: insufficient length of iwork" + shortIsgn = "lapack: insufficient length of isgn" + shortQ = "lapack: insufficient length of q" + shortS = "lapack: insufficient length of s" + shortScale = "lapack: insufficient length of scale" + shortT = "lapack: insufficient length of t" + shortTau = "lapack: insufficient length of tau" + shortTauP = "lapack: insufficient length of tauP" + shortTauQ = "lapack: insufficient length of tauQ" + shortU = "lapack: insufficient length of u" + shortV = "lapack: insufficient length of v" + shortVL = "lapack: insufficient length of vl" + shortVR = "lapack: insufficient length of vr" + shortVT = "lapack: insufficient length of vt" + shortVn1 = "lapack: insufficient length of vn1" + shortVn2 = "lapack: insufficient length of vn2" + shortW = "lapack: insufficient length of w" + shortWH = "lapack: insufficient length of wh" + shortWV = "lapack: insufficient length of wv" + shortWi = "lapack: insufficient length of wi" + shortWork = "lapack: insufficient length of work" + shortWr = "lapack: insufficient length of wr" + shortX = "lapack: insufficient length of x" + shortY = "lapack: insufficient length of y" + shortZ = "lapack: insufficient length of z" + + // Panic strings for bad leading dimensions of matrices. + badLdA = "lapack: bad leading dimension of A" + badLdB = "lapack: bad leading dimension of B" + badLdC = "lapack: bad leading dimension of C" + badLdF = "lapack: bad leading dimension of F" + badLdH = "lapack: bad leading dimension of H" + badLdQ = "lapack: bad leading dimension of Q" + badLdT = "lapack: bad leading dimension of T" + badLdU = "lapack: bad leading dimension of U" + badLdV = "lapack: bad leading dimension of V" + badLdVL = "lapack: bad leading dimension of VL" + badLdVR = "lapack: bad leading dimension of VR" + badLdVT = "lapack: bad leading dimension of VT" + badLdW = "lapack: bad leading dimension of W" + badLdWH = "lapack: bad leading dimension of WH" + badLdWV = "lapack: bad leading dimension of WV" + badLdWork = "lapack: bad leading dimension of Work" + badLdX = "lapack: bad leading dimension of X" + badLdY = "lapack: bad leading dimension of Y" + badLdZ = "lapack: bad leading dimension of Z" + + // Panic strings for bad vector increments. + absIncNotOne = "lapack: increment not one or negative one" + badIncX = "lapack: incX <= 0" + badIncY = "lapack: incY <= 0" + zeroIncV = "lapack: incv == 0" +) diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go new file mode 100644 index 00000000..b251d726 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go @@ -0,0 +1,45 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Iladlc scans a matrix for its last non-zero column. Returns -1 if the matrix +// is all zeros. +// +// Iladlc is an internal routine. It is exported for testing purposes. +func (Implementation) Iladlc(m, n int, a []float64, lda int) int { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 || m == 0 { + return -1 + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + + // Test common case where corner is non-zero. + if a[n-1] != 0 || a[(m-1)*lda+(n-1)] != 0 { + return n - 1 + } + + // Scan each row tracking the highest column seen. + highest := -1 + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + if a[i*lda+j] != 0 { + highest = max(highest, j) + break + } + } + } + return highest +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go new file mode 100644 index 00000000..b73fe18e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go @@ -0,0 +1,41 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Iladlr scans a matrix for its last non-zero row. Returns -1 if the matrix +// is all zeros. +// +// Iladlr is an internal routine. It is exported for testing purposes. +func (Implementation) Iladlr(m, n int, a []float64, lda int) int { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 || m == 0 { + return -1 + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + + // Check the common case where the corner is non-zero + if a[(m-1)*lda] != 0 || a[(m-1)*lda+n-1] != 0 { + return m - 1 + } + for i := m - 1; i >= 0; i-- { + for j := 0; j < n; j++ { + if a[i*lda+j] != 0 { + return i + } + } + } + return -1 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go b/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go new file mode 100644 index 00000000..c134d21b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go @@ -0,0 +1,387 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Ilaenv returns algorithm tuning parameters for the algorithm given by the +// input string. ispec specifies the parameter to return: +// 1: The optimal block size for a blocked algorithm. +// 2: The minimum block size for a blocked algorithm. +// 3: The block size of unprocessed data at which a blocked algorithm should +// crossover to an unblocked version. +// 4: The number of shifts. +// 5: The minimum column dimension for blocking to be used. +// 6: The crossover point for SVD (to use QR factorization or not). +// 7: The number of processors. +// 8: The crossover point for multi-shift in QR and QZ methods for non-symmetric eigenvalue problems. +// 9: Maximum size of the subproblems in divide-and-conquer algorithms. +// 10: ieee NaN arithmetic can be trusted not to trap. +// 11: infinity arithmetic can be trusted not to trap. +// 12...16: parameters for Dhseqr and related functions. See Iparmq for more +// information. +// +// Ilaenv is an internal routine. It is exported for testing purposes. +func (impl Implementation) Ilaenv(ispec int, name string, opts string, n1, n2, n3, n4 int) int { + // TODO(btracey): Replace this with a constant lookup? A list of constants? + sname := name[0] == 'S' || name[0] == 'D' + cname := name[0] == 'C' || name[0] == 'Z' + if !sname && !cname { + panic(badName) + } + c2 := name[1:3] + c3 := name[3:6] + c4 := c3[1:3] + + switch ispec { + default: + panic(badIspec) + case 1: + switch c2 { + default: + panic(badName) + case "GE": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + return 64 + } + return 64 + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 32 + } + return 32 + case "HRD": + if sname { + return 32 + } + return 32 + case "BRD": + if sname { + return 32 + } + return 32 + case "TRI": + if sname { + return 64 + } + return 64 + } + case "PO": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + return 64 + } + return 64 + } + case "SY": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + return 64 + } + return 64 + case "TRD": + return 32 + case "GST": + return 64 + } + case "HE": + switch c3 { + default: + panic(badName) + case "TRF": + return 64 + case "TRD": + return 32 + case "GST": + return 64 + } + case "OR": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c3[1:] { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + case 'M': + switch c3[1:] { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + } + case "UN": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c3[1:] { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + case 'M': + switch c3[1:] { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + } + case "GB": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + if n4 <= 64 { + return 1 + } + return 32 + } + if n4 <= 64 { + return 1 + } + return 32 + } + case "PB": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + if n4 <= 64 { + return 1 + } + return 32 + } + if n4 <= 64 { + return 1 + } + return 32 + } + case "TR": + switch c3 { + default: + panic(badName) + case "TRI": + if sname { + return 64 + } + return 64 + case "EVC": + if sname { + return 64 + } + return 64 + } + case "LA": + switch c3 { + default: + panic(badName) + case "UUM": + if sname { + return 64 + } + return 64 + } + case "ST": + if sname && c3 == "EBZ" { + return 1 + } + panic(badName) + } + case 2: + switch c2 { + default: + panic(badName) + case "GE": + switch c3 { + default: + panic(badName) + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 2 + } + return 2 + case "HRD": + if sname { + return 2 + } + return 2 + case "BRD": + if sname { + return 2 + } + return 2 + case "TRI": + if sname { + return 2 + } + return 2 + } + case "SY": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + return 8 + } + return 8 + case "TRD": + if sname { + return 2 + } + panic(badName) + } + case "HE": + if c3 == "TRD" { + return 2 + } + panic(badName) + case "OR": + if !sname { + panic(badName) + } + switch c3[0] { + default: + panic(badName) + case 'G': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + case 'M': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + } + case "UN": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + case 'M': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + } + } + case 3: + switch c2 { + default: + panic(badName) + case "GE": + switch c3 { + default: + panic(badName) + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 128 + } + return 128 + case "HRD": + if sname { + return 128 + } + return 128 + case "BRD": + if sname { + return 128 + } + return 128 + } + case "SY": + if sname && c3 == "TRD" { + return 32 + } + panic(badName) + case "HE": + if c3 == "TRD" { + return 32 + } + panic(badName) + case "OR": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 128 + } + } + case "UN": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 128 + } + } + } + case 4: + // Used by xHSEQR + return 6 + case 5: + // Not used + return 2 + case 6: + // Used by xGELSS and xGESVD + return int(float64(min(n1, n2)) * 1.6) + case 7: + // Not used + return 1 + case 8: + // Used by xHSEQR + return 50 + case 9: + // used by xGELSD and xGESDD + return 25 + case 10: + // Go guarantees ieee + return 1 + case 11: + // Go guarantees ieee + return 1 + case 12, 13, 14, 15, 16: + // Dhseqr and related functions for eigenvalue problems. + return impl.Iparmq(ispec, name, opts, n1, n2, n3, n4) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go new file mode 100644 index 00000000..3800f11c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go @@ -0,0 +1,115 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Iparmq returns problem and machine dependent parameters useful for Dhseqr and +// related subroutines for eigenvalue problems. +// +// ispec specifies the parameter to return: +// 12: Crossover point between Dlahqr and Dlaqr0. Will be at least 11. +// 13: Deflation window size. +// 14: Nibble crossover point. Determines when to skip a multi-shift QR sweep. +// 15: Number of simultaneous shifts in a multishift QR iteration. +// 16: Select structured matrix multiply. +// For other values of ispec Iparmq will panic. +// +// name is the name of the calling function. name must be in uppercase but this +// is not checked. +// +// opts is not used and exists for future use. +// +// n is the order of the Hessenberg matrix H. +// +// ilo and ihi specify the block [ilo:ihi+1,ilo:ihi+1] that is being processed. +// +// lwork is the amount of workspace available. +// +// Except for ispec input parameters are not checked. +// +// Iparmq is an internal routine. It is exported for testing purposes. +func (Implementation) Iparmq(ispec int, name, opts string, n, ilo, ihi, lwork int) int { + nh := ihi - ilo + 1 + ns := 2 + switch { + case nh >= 30: + ns = 4 + case nh >= 60: + ns = 10 + case nh >= 150: + ns = max(10, nh/int(math.Log(float64(nh))/math.Ln2)) + case nh >= 590: + ns = 64 + case nh >= 3000: + ns = 128 + case nh >= 6000: + ns = 256 + } + ns = max(2, ns-(ns%2)) + + switch ispec { + default: + panic(badIspec) + + case 12: + // Matrices of order smaller than nmin get sent to Dlahqr, the + // classic double shift algorithm. This must be at least 11. + const nmin = 75 + return nmin + + case 13: + const knwswp = 500 + if nh <= knwswp { + return ns + } + return 3 * ns / 2 + + case 14: + // Skip a computationally expensive multi-shift QR sweep with + // Dlaqr5 whenever aggressive early deflation finds at least + // nibble*(window size)/100 deflations. The default, small, + // value reflects the expectation that the cost of looking + // through the deflation window with Dlaqr3 will be + // substantially smaller. + const nibble = 14 + return nibble + + case 15: + return ns + + case 16: + if len(name) != 6 { + panic(badName) + } + const ( + k22min = 14 + kacmin = 14 + ) + var acc22 int + switch { + case name[1:] == "GGHRD" || name[1:] == "GGHD3": + acc22 = 1 + if nh >= k22min { + acc22 = 2 + } + case name[3:] == "EXC": + if nh >= kacmin { + acc22 = 1 + } + if nh >= k22min { + acc22 = 2 + } + case name[1:] == "HSEQR" || name[1:5] == "LAQR": + if ns >= kacmin { + acc22 = 1 + } + if ns >= k22min { + acc22 = 2 + } + } + return acc22 + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go new file mode 100644 index 00000000..434da02d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go @@ -0,0 +1,51 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/lapack" + +// Implementation is the native Go implementation of LAPACK routines. It +// is built on top of calls to the return of blas64.Implementation(), so while +// this code is in pure Go, the underlying BLAS implementation may not be. +type Implementation struct{} + +var _ lapack.Float64 = Implementation{} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func abs(a int) int { + if a < 0 { + return -a + } + return a +} + +const ( + // dlamchE is the machine epsilon. For IEEE this is 2^{-53}. + dlamchE = 1.0 / (1 << 53) + + // dlamchB is the radix of the machine (the base of the number system). + dlamchB = 2 + + // dlamchP is base * eps. + dlamchP = dlamchB * dlamchE + + // dlamchS is the "safe minimum", that is, the lowest number such that + // 1/dlamchS does not overflow, or also the smallest normal number. + // For IEEE this is 2^{-1022}. + dlamchS = 1.0 / (1 << 256) / (1 << 256) / (1 << 256) / (1 << 254) +) diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack.go b/vendor/gonum.org/v1/gonum/lapack/lapack.go new file mode 100644 index 00000000..eef14c17 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/lapack.go @@ -0,0 +1,213 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lapack + +import "gonum.org/v1/gonum/blas" + +// Complex128 defines the public complex128 LAPACK API supported by gonum/lapack. +type Complex128 interface{} + +// Float64 defines the public float64 LAPACK API supported by gonum/lapack. +type Float64 interface { + Dgecon(norm MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 + Dgeev(jobvl LeftEVJob, jobvr RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int) + Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool + Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) + Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) + Dgesvd(jobU, jobVT SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool) + Dgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool) + Dgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool) + Dgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int) + Dggsvd3(jobU, jobV, jobQ GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool) + Dlantr(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64 + Dlange(norm MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 + Dlansy(norm MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64 + Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) + Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) + Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) + Dpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 + Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) + Dpotri(ul blas.Uplo, n int, a []float64, lda int) (ok bool) + Dpotrs(ul blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int) + Dsyev(jobz EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool) + Dtrcon(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64 + Dtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool) + Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) +} + +// Direct specifies the direction of the multiplication for the Householder matrix. +type Direct byte + +const ( + Forward Direct = 'F' // Reflectors are right-multiplied, H_0 * H_1 * ... * H_{k-1}. + Backward Direct = 'B' // Reflectors are left-multiplied, H_{k-1} * ... * H_1 * H_0. +) + +// Sort is the sorting order. +type Sort byte + +const ( + SortIncreasing Sort = 'I' + SortDecreasing Sort = 'D' +) + +// StoreV indicates the storage direction of elementary reflectors. +type StoreV byte + +const ( + ColumnWise StoreV = 'C' // Reflector stored in a column of the matrix. + RowWise StoreV = 'R' // Reflector stored in a row of the matrix. +) + +// MatrixNorm represents the kind of matrix norm to compute. +type MatrixNorm byte + +const ( + MaxAbs MatrixNorm = 'M' // max(abs(A(i,j))) + MaxColumnSum MatrixNorm = 'O' // Maximum absolute column sum (one norm) + MaxRowSum MatrixNorm = 'I' // Maximum absolute row sum (infinity norm) + Frobenius MatrixNorm = 'F' // Frobenius norm (sqrt of sum of squares) +) + +// MatrixType represents the kind of matrix represented in the data. +type MatrixType byte + +const ( + General MatrixType = 'G' // A general dense matrix. + UpperTri MatrixType = 'U' // An upper triangular matrix. + LowerTri MatrixType = 'L' // A lower triangular matrix. +) + +// Pivot specifies the pivot type for plane rotations. +type Pivot byte + +const ( + Variable Pivot = 'V' + Top Pivot = 'T' + Bottom Pivot = 'B' +) + +// ApplyOrtho specifies which orthogonal matrix is applied in Dormbr. +type ApplyOrtho byte + +const ( + ApplyP ApplyOrtho = 'P' // Apply P or P^T. + ApplyQ ApplyOrtho = 'Q' // Apply Q or Q^T. +) + +// GenOrtho specifies which orthogonal matrix is generated in Dorgbr. +type GenOrtho byte + +const ( + GeneratePT GenOrtho = 'P' // Generate P^T. + GenerateQ GenOrtho = 'Q' // Generate Q. +) + +// SVDJob specifies the singular vector computation type for SVD. +type SVDJob byte + +const ( + SVDAll SVDJob = 'A' // Compute all columns of the orthogonal matrix U or V. + SVDStore SVDJob = 'S' // Compute the singular vectors and store them in the orthogonal matrix U or V. + SVDOverwrite SVDJob = 'O' // Compute the singular vectors and overwrite them on the input matrix A. + SVDNone SVDJob = 'N' // Do not compute singular vectors. +) + +// GSVDJob specifies the singular vector computation type for Generalized SVD. +type GSVDJob byte + +const ( + GSVDU GSVDJob = 'U' // Compute orthogonal matrix U. + GSVDV GSVDJob = 'V' // Compute orthogonal matrix V. + GSVDQ GSVDJob = 'Q' // Compute orthogonal matrix Q. + GSVDUnit GSVDJob = 'I' // Use unit-initialized matrix. + GSVDNone GSVDJob = 'N' // Do not compute orthogonal matrix. +) + +// EVComp specifies how eigenvectors are computed in Dsteqr. +type EVComp byte + +const ( + EVOrig EVComp = 'V' // Compute eigenvectors of the original symmetric matrix. + EVTridiag EVComp = 'I' // Compute eigenvectors of the tridiagonal matrix. + EVCompNone EVComp = 'N' // Do not compute eigenvectors. +) + +// EVJob specifies whether eigenvectors are computed in Dsyev. +type EVJob byte + +const ( + EVCompute EVJob = 'V' // Compute eigenvectors. + EVNone EVJob = 'N' // Do not compute eigenvectors. +) + +// LeftEVJob specifies whether left eigenvectors are computed in Dgeev. +type LeftEVJob byte + +const ( + LeftEVCompute LeftEVJob = 'V' // Compute left eigenvectors. + LeftEVNone LeftEVJob = 'N' // Do not compute left eigenvectors. +) + +// RightEVJob specifies whether right eigenvectors are computed in Dgeev. +type RightEVJob byte + +const ( + RightEVCompute RightEVJob = 'V' // Compute right eigenvectors. + RightEVNone RightEVJob = 'N' // Do not compute right eigenvectors. +) + +// BalanceJob specifies matrix balancing operation. +type BalanceJob byte + +const ( + Permute BalanceJob = 'P' + Scale BalanceJob = 'S' + PermuteScale BalanceJob = 'B' + BalanceNone BalanceJob = 'N' +) + +// SchurJob specifies whether the Schur form is computed in Dhseqr. +type SchurJob byte + +const ( + EigenvaluesOnly SchurJob = 'E' + EigenvaluesAndSchur SchurJob = 'S' +) + +// SchurComp specifies whether and how the Schur vectors are computed in Dhseqr. +type SchurComp byte + +const ( + SchurOrig SchurComp = 'V' // Compute Schur vectors of the original matrix. + SchurHess SchurComp = 'I' // Compute Schur vectors of the upper Hessenberg matrix. + SchurNone SchurComp = 'N' // Do not compute Schur vectors. +) + +// UpdateSchurComp specifies whether the matrix of Schur vectors is updated in Dtrexc. +type UpdateSchurComp byte + +const ( + UpdateSchur UpdateSchurComp = 'V' // Update the matrix of Schur vectors. + UpdateSchurNone UpdateSchurComp = 'N' // Do not update the matrix of Schur vectors. +) + +// EVSide specifies what eigenvectors are computed in Dtrevc3. +type EVSide byte + +const ( + EVRight EVSide = 'R' // Compute only right eigenvectors. + EVLeft EVSide = 'L' // Compute only left eigenvectors. + EVBoth EVSide = 'B' // Compute both right and left eigenvectors. +) + +// EVHowMany specifies which eigenvectors are computed in Dtrevc3 and how. +type EVHowMany byte + +const ( + EVAll EVHowMany = 'A' // Compute all right and/or left eigenvectors. + EVAllMulQ EVHowMany = 'B' // Compute all right and/or left eigenvectors multiplied by an input matrix. + EVSelected EVHowMany = 'S' // Compute selected right and/or left eigenvectors. +) diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go b/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go new file mode 100644 index 00000000..da19e3ec --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go @@ -0,0 +1,20 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lapack64 provides a set of convenient wrapper functions for LAPACK +// calls, as specified in the netlib standard (www.netlib.org). +// +// The native Go routines are used by default, and the Use function can be used +// to set an alternative implementation. +// +// If the type of matrix (General, Symmetric, etc.) is known and fixed, it is +// used in the wrapper signature. In many cases, however, the type of the matrix +// changes during the call to the routine, for example the matrix is symmetric on +// entry and is triangular on exit. In these cases the correct types should be checked +// in the documentation. +// +// The full set of Lapack functions is very large, and it is not clear that a +// full implementation is desirable, let alone feasible. Please open up an issue +// if there is a specific function you need and/or are willing to implement. +package lapack64 // import "gonum.org/v1/gonum/lapack/lapack64" diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go new file mode 100644 index 00000000..208ee1f4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go @@ -0,0 +1,581 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lapack64 + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/gonum" +) + +var lapack64 lapack.Float64 = gonum.Implementation{} + +// Use sets the LAPACK float64 implementation to be used by subsequent BLAS calls. +// The default implementation is native.Implementation. +func Use(l lapack.Float64) { + lapack64 = l +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// Potrf computes the Cholesky factorization of a. +// The factorization has the form +// A = U^T * U if a.Uplo == blas.Upper, or +// A = L * L^T if a.Uplo == blas.Lower, +// where U is an upper triangular matrix and L is lower triangular. +// The triangular matrix is returned in t, and the underlying data between +// a and t is shared. The returned bool indicates whether a is positive +// definite and the factorization could be finished. +func Potrf(a blas64.Symmetric) (t blas64.Triangular, ok bool) { + ok = lapack64.Dpotrf(a.Uplo, a.N, a.Data, max(1, a.Stride)) + t.Uplo = a.Uplo + t.N = a.N + t.Data = a.Data + t.Stride = a.Stride + t.Diag = blas.NonUnit + return +} + +// Potri computes the inverse of a real symmetric positive definite matrix A +// using its Cholesky factorization. +// +// On entry, t contains the triangular factor U or L from the Cholesky +// factorization A = U^T*U or A = L*L^T, as computed by Potrf. +// +// On return, the upper or lower triangle of the (symmetric) inverse of A is +// stored in t, overwriting the input factor U or L, and also returned in a. The +// underlying data between a and t is shared. +// +// The returned bool indicates whether the inverse was computed successfully. +func Potri(t blas64.Triangular) (a blas64.Symmetric, ok bool) { + ok = lapack64.Dpotri(t.Uplo, t.N, t.Data, max(1, t.Stride)) + a.Uplo = t.Uplo + a.N = t.N + a.Data = t.Data + a.Stride = t.Stride + return +} + +// Potrs solves a system of n linear equations A*X = B where A is an n×n +// symmetric positive definite matrix and B is an n×nrhs matrix, using the +// Cholesky factorization A = U^T*U or A = L*L^T. t contains the corresponding +// triangular factor as returned by Potrf. On entry, B contains the right-hand +// side matrix B, on return it contains the solution matrix X. +func Potrs(t blas64.Triangular, b blas64.General) { + lapack64.Dpotrs(t.Uplo, t.N, b.Cols, t.Data, max(1, t.Stride), b.Data, max(1, b.Stride)) +} + +// Gecon estimates the reciprocal of the condition number of the n×n matrix A +// given the LU decomposition of the matrix. The condition number computed may +// be based on the 1-norm or the ∞-norm. +// +// a contains the result of the LU decomposition of A as computed by Getrf. +// +// anorm is the corresponding 1-norm or ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 4*n and Gecon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Gecon will panic otherwise. +func Gecon(norm lapack.MatrixNorm, a blas64.General, anorm float64, work []float64, iwork []int) float64 { + return lapack64.Dgecon(norm, a.Cols, a.Data, max(1, a.Stride), anorm, work, iwork) +} + +// Gels finds a minimum-norm solution based on the matrices A and B using the +// QR or LQ factorization. Gels returns false if the matrix +// A is singular, and true if this solution was successfully found. +// +// The minimization problem solved depends on the input parameters. +// +// 1. If m >= n and trans == blas.NoTrans, Gels finds X such that || A*X - B||_2 +// is minimized. +// 2. If m < n and trans == blas.NoTrans, Gels finds the minimum norm solution of +// A * X = B. +// 3. If m >= n and trans == blas.Trans, Gels finds the minimum norm solution of +// A^T * X = B. +// 4. If m < n and trans == blas.Trans, Gels finds X such that || A*X - B||_2 +// is minimized. +// Note that the least-squares solutions (cases 1 and 3) perform the minimization +// per column of B. This is not the same as finding the minimum-norm matrix. +// +// The matrix A is a general matrix of size m×n and is modified during this call. +// The input matrix B is of size max(m,n)×nrhs, and serves two purposes. On entry, +// the elements of b specify the input matrix B. B has size m×nrhs if +// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the +// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, +// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic +// otherwise. A longer work will enable blocked algorithms to be called. +// In the special case that lwork == -1, work[0] will be set to the optimal working +// length. +func Gels(trans blas.Transpose, a blas64.General, b blas64.General, work []float64, lwork int) bool { + return lapack64.Dgels(trans, a.Rows, a.Cols, b.Cols, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride), work, lwork) +} + +// Geqrf computes the QR factorization of the m×n matrix A using a blocked +// algorithm. A is modified to contain the information to construct Q and R. +// The upper triangle of a contains the matrix R. The lower triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length at least min(m,n), and +// this function will panic otherwise. +// +// The ith elementary reflector can be explicitly constructed by first extracting +// the +// v[j] = 0 j < i +// v[j] = 1 j == i +// v[j] = a[j*lda+i] j > i +// and computing H_i = I - tau[i] * v * v^T. +// +// The orthonormal matrix Q can be constucted from a product of these elementary +// reflectors, Q = H_0 * H_1 * ... * H_{k-1}, where k = min(m,n). +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m and this function will panic otherwise. +// Geqrf is a blocked QR factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Geqrf, +// the optimal work length will be stored into work[0]. +func Geqrf(a blas64.General, tau, work []float64, lwork int) { + lapack64.Dgeqrf(a.Rows, a.Cols, a.Data, max(1, a.Stride), tau, work, lwork) +} + +// Gelqf computes the LQ factorization of the m×n matrix A using a blocked +// algorithm. A is modified to contain the information to construct L and Q. The +// lower triangle of a contains the matrix L. The elements above the diagonal +// and the slice tau represent the matrix Q. tau is modified to contain the +// reflector scales. tau must have length at least min(m,n), and this function +// will panic otherwise. +// +// See Geqrf for a description of the elementary reflectors and orthonormal +// matrix Q. Q is constructed as a product of these elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m and this function will panic otherwise. +// Gelqf is a blocked LQ factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Gelqf, +// the optimal work length will be stored into work[0]. +func Gelqf(a blas64.General, tau, work []float64, lwork int) { + lapack64.Dgelqf(a.Rows, a.Cols, a.Data, max(1, a.Stride), tau, work, lwork) +} + +// Gesvd computes the singular value decomposition of the input matrix A. +// +// The singular value decomposition is +// A = U * Sigma * V^T +// where Sigma is an m×n diagonal matrix containing the singular values of A, +// U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first +// min(m,n) columns of U and V are the left and right singular vectors of A +// respectively. +// +// jobU and jobVT are options for computing the singular vectors. The behavior +// is as follows +// jobU == lapack.SVDAll All m columns of U are returned in u +// jobU == lapack.SVDStore The first min(m,n) columns are returned in u +// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a +// jobU == lapack.SVDNone The columns of U are not computed. +// The behavior is the same for jobVT and the rows of V^T. At most one of jobU +// and jobVT can equal lapack.SVDOverwrite, and Gesvd will panic otherwise. +// +// On entry, a contains the data for the m×n matrix A. During the call to Gesvd +// the data is overwritten. On exit, A contains the appropriate singular vectors +// if either job is lapack.SVDOverwrite. +// +// s is a slice of length at least min(m,n) and on exit contains the singular +// values in decreasing order. +// +// u contains the left singular vectors on exit, stored columnwise. If +// jobU == lapack.SVDAll, u is of size m×m. If jobU == lapack.SVDStore u is +// of size m×min(m,n). If jobU == lapack.SVDOverwrite or lapack.SVDNone, u is +// not used. +// +// vt contains the left singular vectors on exit, stored rowwise. If +// jobV == lapack.SVDAll, vt is of size n×m. If jobVT == lapack.SVDStore vt is +// of size min(m,n)×n. If jobVT == lapack.SVDOverwrite or lapack.SVDNone, vt is +// not used. +// +// work is a slice for storing temporary memory, and lwork is the usable size of +// the slice. lwork must be at least max(5*min(m,n), 3*min(m,n)+max(m,n)). +// If lwork == -1, instead of performing Gesvd, the optimal work length will be +// stored into work[0]. Gesvd will panic if the working memory has insufficient +// storage. +// +// Gesvd returns whether the decomposition successfully completed. +func Gesvd(jobU, jobVT lapack.SVDJob, a, u, vt blas64.General, s, work []float64, lwork int) (ok bool) { + return lapack64.Dgesvd(jobU, jobVT, a.Rows, a.Cols, a.Data, max(1, a.Stride), s, u.Data, max(1, u.Stride), vt.Data, max(1, vt.Stride), work, lwork) +} + +// Getrf computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of A into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Getrf is the blocked version of the algorithm. +// +// Getrf returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +func Getrf(a blas64.General, ipiv []int) bool { + return lapack64.Dgetrf(a.Rows, a.Cols, a.Data, max(1, a.Stride), ipiv) +} + +// Getri computes the inverse of the matrix A using the LU factorization computed +// by Getrf. On entry, a contains the PLU decomposition of A as computed by +// Getrf and on exit contains the reciprocal of the original matrix. +// +// Getri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= n and this function will panic otherwise. +// Getri is a blocked inversion, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Getri, +// the optimal work length will be stored into work[0]. +func Getri(a blas64.General, ipiv []int, work []float64, lwork int) (ok bool) { + return lapack64.Dgetri(a.Cols, a.Data, max(1, a.Stride), ipiv, work, lwork) +} + +// Getrs solves a system of equations using an LU factorization. +// The system of equations solved is +// A * X = B if trans == blas.Trans +// A^T * X = B if trans == blas.NoTrans +// A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. +// +// On entry b contains the elements of the matrix B. On exit, b contains the +// elements of X, the solution to the system of equations. +// +// a and ipiv contain the LU factorization of A and the permutation indices as +// computed by Getrf. ipiv is zero-indexed. +func Getrs(trans blas.Transpose, a blas64.General, b blas64.General, ipiv []int) { + lapack64.Dgetrs(trans, a.Cols, b.Cols, a.Data, max(1, a.Stride), ipiv, b.Data, max(1, b.Stride)) +} + +// Ggsvd3 computes the generalized singular value decomposition (GSVD) +// of an m×n matrix A and p×n matrix B: +// U^T*A*Q = D1*[ 0 R ] +// +// V^T*B*Q = D2*[ 0 R ] +// where U, V and Q are orthogonal matrices. +// +// Ggsvd3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// R is a (k+l)×(k+l) nonsingular upper triangular matrix, D1 and +// D2 are m×(k+l) and p×(k+l) diagonal matrices and of the following +// structures, respectively: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// Ggsvd3 computes C, S, R, and optionally the orthogonal transformation +// matrices U, V and Q. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// alpha and beta must have length n or Ggsvd3 will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, iwork contains the permutation required to sort alpha descending. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than n, otherwise Ggsvd3 will panic. If +// lwork is -1, work[0] holds the optimal lwork on return, but Ggsvd3 does +// not perform the GSVD. +func Ggsvd3(jobU, jobV, jobQ lapack.GSVDJob, a, b blas64.General, alpha, beta []float64, u, v, q blas64.General, work []float64, lwork int, iwork []int) (k, l int, ok bool) { + return lapack64.Dggsvd3(jobU, jobV, jobQ, a.Rows, a.Cols, b.Rows, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride), alpha, beta, u.Data, max(1, u.Stride), v.Data, max(1, v.Stride), q.Data, max(1, q.Stride), work, lwork, iwork) +} + +// Lange computes the matrix norm of the general m×n matrix A. The input norm +// specifies the norm computed. +// lapack.MaxAbs: the maximum absolute value of an element. +// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. +// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. +// lapack.Frobenius: the square root of the sum of the squares of the entries. +// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func Lange(norm lapack.MatrixNorm, a blas64.General, work []float64) float64 { + return lapack64.Dlange(norm, a.Rows, a.Cols, a.Data, max(1, a.Stride), work) +} + +// Lansy computes the specified norm of an n×n symmetric matrix. If +// norm == lapack.MaxColumnSum or norm == lapackMaxRowSum work must have length +// at least n and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func Lansy(norm lapack.MatrixNorm, a blas64.Symmetric, work []float64) float64 { + return lapack64.Dlansy(norm, a.Uplo, a.N, a.Data, max(1, a.Stride), work) +} + +// Lantr computes the specified norm of an m×n trapezoidal matrix A. If +// norm == lapack.MaxColumnSum work must have length at least n and this function +// will panic otherwise. There are no restrictions on work for the other matrix norms. +func Lantr(norm lapack.MatrixNorm, a blas64.Triangular, work []float64) float64 { + return lapack64.Dlantr(norm, a.Uplo, a.Diag, a.N, a.N, a.Data, max(1, a.Stride), work) +} + +// Lapmt rearranges the columns of the m×n matrix X as specified by the +// permutation k_0, k_1, ..., k_{n-1} of the integers 0, ..., n-1. +// +// If forward is true a forward permutation is performed: +// +// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. +// +// otherwise a backward permutation is performed: +// +// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. +// +// k must have length n, otherwise Lapmt will panic. k is zero-indexed. +func Lapmt(forward bool, x blas64.General, k []int) { + lapack64.Dlapmt(forward, x.Rows, x.Cols, x.Data, max(1, x.Stride), k) +} + +// Ormlq multiplies the matrix C by the othogonal matrix Q defined by +// A and tau. A and tau are as returned from Gelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right +// A is of size k×n. This uses a blocked algorithm. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, +// and this function will panic otherwise. +// Ormlq uses a block algorithm, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Ormlq, +// the optimal work length will be stored into work[0]. +// +// Tau contains the Householder scales and must have length at least k, and +// this function will panic otherwise. +func Ormlq(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64, c blas64.General, work []float64, lwork int) { + lapack64.Dormlq(side, trans, c.Rows, c.Cols, a.Rows, a.Data, max(1, a.Stride), tau, c.Data, max(1, c.Stride), work, lwork) +} + +// Ormqr multiplies an m×n matrix C by an orthogonal matrix Q as +// C = Q * C, if side == blas.Left and trans == blas.NoTrans, +// C = Q^T * C, if side == blas.Left and trans == blas.Trans, +// C = C * Q, if side == blas.Right and trans == blas.NoTrans, +// C = C * Q^T, if side == blas.Right and trans == blas.Trans, +// where Q is defined as the product of k elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1}. +// +// If side == blas.Left, A is an m×k matrix and 0 <= k <= m. +// If side == blas.Right, A is an n×k matrix and 0 <= k <= n. +// The ith column of A contains the vector which defines the elementary +// reflector H_i and tau[i] contains its scalar factor. tau must have length k +// and Ormqr will panic otherwise. Geqrf returns A and tau in the required +// form. +// +// work must have length at least max(1,lwork), and lwork must be at least n if +// side == blas.Left and at least m if side == blas.Right, otherwise Ormqr will +// panic. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= m if side == blas.Left and lwork >= n if side == +// blas.Right, and this function will panic otherwise. Larger values of lwork +// will generally give better performance. On return, work[0] will contain the +// optimal value of lwork. +// +// If lwork is -1, instead of performing Ormqr, the optimal workspace size will +// be stored into work[0]. +func Ormqr(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64, c blas64.General, work []float64, lwork int) { + lapack64.Dormqr(side, trans, c.Rows, c.Cols, a.Cols, a.Data, max(1, a.Stride), tau, c.Data, max(1, c.Stride), work, lwork) +} + +// Pocon estimates the reciprocal of the condition number of a positive-definite +// matrix A given the Cholesky decmposition of A. The condition number computed +// is based on the 1-norm and the ∞-norm. +// +// anorm is the 1-norm and the ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 3*n and Pocon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Pocon will panic otherwise. +func Pocon(a blas64.Symmetric, anorm float64, work []float64, iwork []int) float64 { + return lapack64.Dpocon(a.Uplo, a.N, a.Data, max(1, a.Stride), anorm, work, iwork) +} + +// Syev computes all eigenvalues and, optionally, the eigenvectors of a real +// symmetric matrix A. +// +// w contains the eigenvalues in ascending order upon return. w must have length +// at least n, and Syev will panic otherwise. +// +// On entry, a contains the elements of the symmetric matrix A in the triangular +// portion specified by uplo. If jobz == lapack.EVCompute, a contains the +// orthonormal eigenvectors of A on exit, otherwise jobz must be lapack.EVNone +// and on exit the specified triangular region is overwritten. +// +// Work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 3*n-1, and Syev will panic otherwise. The amount of blocking is +// limited by the usable length. If lwork == -1, instead of computing Syev the +// optimal work length is stored into work[0]. +func Syev(jobz lapack.EVJob, a blas64.Symmetric, w, work []float64, lwork int) (ok bool) { + return lapack64.Dsyev(jobz, a.Uplo, a.N, a.Data, max(1, a.Stride), w, work, lwork) +} + +// Trcon estimates the reciprocal of the condition number of a triangular matrix A. +// The condition number computed may be based on the 1-norm or the ∞-norm. +// +// work is a temporary data slice of length at least 3*n and Trcon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Trcon will panic otherwise. +func Trcon(norm lapack.MatrixNorm, a blas64.Triangular, work []float64, iwork []int) float64 { + return lapack64.Dtrcon(norm, a.Uplo, a.Diag, a.N, a.Data, max(1, a.Stride), work, iwork) +} + +// Trtri computes the inverse of a triangular matrix, storing the result in place +// into a. +// +// Trtri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +func Trtri(a blas64.Triangular) (ok bool) { + return lapack64.Dtrtri(a.Uplo, a.Diag, a.N, a.Data, max(1, a.Stride)) +} + +// Trtrs solves a triangular system of the form A * X = B or A^T * X = B. Trtrs +// returns whether the solve completed successfully. If A is singular, no solve is performed. +func Trtrs(trans blas.Transpose, a blas64.Triangular, b blas64.General) (ok bool) { + return lapack64.Dtrtrs(a.Uplo, trans, a.Diag, a.N, b.Cols, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride)) +} + +// Geev computes the eigenvalues and, optionally, the left and/or right +// eigenvectors for an n×n real nonsymmetric matrix A. +// +// The right eigenvector v_j of A corresponding to an eigenvalue λ_j +// is defined by +// A v_j = λ_j v_j, +// and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by +// u_j^H A = λ_j u_j^H, +// where u_j^H is the conjugate transpose of u_j. +// +// On return, A will be overwritten and the left and right eigenvectors will be +// stored, respectively, in the columns of the n×n matrices VL and VR in the +// same order as their eigenvalues. If the j-th eigenvalue is real, then +// u_j = VL[:,j], +// v_j = VR[:,j], +// and if it is not real, then j and j+1 form a complex conjugate pair and the +// eigenvectors can be recovered as +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// where i is the imaginary unit. The computed eigenvectors are normalized to +// have Euclidean norm equal to 1 and largest component real. +// +// Left eigenvectors will be computed only if jobvl == lapack.LeftEVCompute, +// otherwise jobvl must be lapack.LeftEVNone. +// Right eigenvectors will be computed only if jobvr == lapack.RightEVCompute, +// otherwise jobvr must be lapack.RightEVNone. +// For other values of jobvl and jobvr Geev will panic. +// +// On return, wr and wi will contain the real and imaginary parts, respectively, +// of the computed eigenvalues. Complex conjugate pairs of eigenvalues appear +// consecutively with the eigenvalue having the positive imaginary part first. +// wr and wi must have length n, and Geev will panic otherwise. +// +// work must have length at least lwork and lwork must be at least max(1,4*n) if +// the left or right eigenvectors are computed, and at least max(1,3*n) if no +// eigenvectors are computed. For good performance, lwork must generally be +// larger. On return, optimal value of lwork will be stored in work[0]. +// +// If lwork == -1, instead of performing Geev, the function only calculates the +// optimal vaule of lwork and stores it into work[0]. +// +// On return, first will be the index of the first valid eigenvalue. +// If first == 0, all eigenvalues and eigenvectors have been computed. +// If first is positive, Geev failed to compute all the eigenvalues, no +// eigenvectors have been computed and wr[first:] and wi[first:] contain those +// eigenvalues which have converged. +func Geev(jobvl lapack.LeftEVJob, jobvr lapack.RightEVJob, a blas64.General, wr, wi []float64, vl, vr blas64.General, work []float64, lwork int) (first int) { + n := a.Rows + if a.Cols != n { + panic("lapack64: matrix not square") + } + if jobvl == lapack.LeftEVCompute && (vl.Rows != n || vl.Cols != n) { + panic("lapack64: bad size of VL") + } + if jobvr == lapack.RightEVCompute && (vr.Rows != n || vr.Cols != n) { + panic("lapack64: bad size of VR") + } + return lapack64.Dgeev(jobvl, jobvr, n, a.Data, max(1, a.Stride), wr, wi, vl.Data, max(1, vl.Stride), vr.Data, max(1, vr.Stride), work, lwork) +} diff --git a/vendor/gonum.org/v1/gonum/mat/README.md b/vendor/gonum.org/v1/gonum/mat/README.md new file mode 100644 index 00000000..0f77e470 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/README.md @@ -0,0 +1,3 @@ +# Gonum matrix [![GoDoc](https://godoc.org/gonum.org/v1/gonum/mat?status.svg)](https://godoc.org/gonum.org/v1/gonum/mat) + +Package mat is a matrix package for the Go language. diff --git a/vendor/gonum.org/v1/gonum/mat/band.go b/vendor/gonum.org/v1/gonum/mat/band.go new file mode 100644 index 00000000..72ebefdd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/band.go @@ -0,0 +1,263 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + bandDense *BandDense + _ Matrix = bandDense + _ Banded = bandDense + _ RawBander = bandDense + + _ NonZeroDoer = bandDense + _ RowNonZeroDoer = bandDense + _ ColNonZeroDoer = bandDense +) + +// BandDense represents a band matrix in dense storage format. +type BandDense struct { + mat blas64.Band +} + +// Banded is a band matrix representation. +type Banded interface { + Matrix + // Bandwidth returns the lower and upper bandwidth values for + // the matrix. The total bandwidth of the matrix is kl+ku+1. + Bandwidth() (kl, ku int) + + // TBand is the equivalent of the T() method in the Matrix + // interface but guarantees the transpose is of banded type. + TBand() Banded +} + +// A RawBander can return a blas64.Band representation of the receiver. +// Changes to the blas64.Band.Data slice will be reflected in the original +// matrix, changes to the Rows, Cols, KL, KU and Stride fields will not. +type RawBander interface { + RawBand() blas64.Band +} + +// A MutableBanded can set elements of a band matrix. +type MutableBanded interface { + Banded + SetBand(i, j int, v float64) +} + +var ( + _ Matrix = TransposeBand{} + _ Banded = TransposeBand{} + _ UntransposeBander = TransposeBand{} +) + +// TransposeBand is a type for performing an implicit transpose of a band +// matrix. It implements the Banded interface, returning values from the +// transpose of the matrix within. +type TransposeBand struct { + Banded Banded +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Banded field. +func (t TransposeBand) At(i, j int) float64 { + return t.Banded.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. +func (t TransposeBand) Dims() (r, c int) { + c, r = t.Banded.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Banded field. +func (t TransposeBand) T() Matrix { + return t.Banded +} + +// Bandwidth returns the lower and upper bandwidth values for +// the transposed matrix. +func (t TransposeBand) Bandwidth() (kl, ku int) { + kl, ku = t.Banded.Bandwidth() + return ku, kl +} + +// TBand performs an implicit transpose by returning the Banded field. +func (t TransposeBand) TBand() Banded { + return t.Banded +} + +// Untranspose returns the Banded field. +func (t TransposeBand) Untranspose() Matrix { + return t.Banded +} + +// UntransposeBand returns the Banded field. +func (t TransposeBand) UntransposeBand() Banded { + return t.Banded +} + +// NewBandDense creates a new Band matrix with r rows and c columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == min(r, c+kl)*(kl+ku+1), +// data is used as the backing slice, and changes to the elements of the returned +// BandDense will be reflected in data. If neither of these is true, NewBandDense +// will panic. kl must be at least zero and less r, and ku must be at least zero and +// less than c, otherwise NewBandDense will panic. +// NewBandDense will panic if either r or c is zero. +// +// The data must be arranged in row-major order constructed by removing the zeros +// from the rows outside the band and aligning the diagonals. For example, the matrix +// 1 2 3 0 0 0 +// 4 5 6 7 0 0 +// 0 8 9 10 11 0 +// 0 0 12 13 14 15 +// 0 0 0 16 17 18 +// 0 0 0 0 19 20 +// becomes (* entries are never accessed) +// * 1 2 3 +// 4 5 6 7 +// 8 9 10 11 +// 12 13 14 15 +// 16 17 18 * +// 19 20 * * +// which is passed to NewBandDense as []float64{*, 1, 2, 3, 4, ...} with kl=1 and ku=2. +// Only the values in the band portion of the matrix are used. +func NewBandDense(r, c, kl, ku int, data []float64) *BandDense { + if r <= 0 || c <= 0 || kl < 0 || ku < 0 { + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if kl+1 > r || ku+1 > c { + panic("mat: band out of range") + } + bc := kl + ku + 1 + if data != nil && len(data) != min(r, c+kl)*bc { + panic(ErrShape) + } + if data == nil { + data = make([]float64, min(r, c+kl)*bc) + } + return &BandDense{ + mat: blas64.Band{ + Rows: r, + Cols: c, + KL: kl, + KU: ku, + Stride: bc, + Data: data, + }, + } +} + +// NewDiagonalRect is a convenience function that returns a diagonal matrix represented by a +// BandDense. The length of data must be min(r, c) otherwise NewDiagonalRect will panic. +func NewDiagonalRect(r, c int, data []float64) *BandDense { + return NewBandDense(r, c, 0, 0, data) +} + +// Dims returns the number of rows and columns in the matrix. +func (b *BandDense) Dims() (r, c int) { + return b.mat.Rows, b.mat.Cols +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +func (b *BandDense) Bandwidth() (kl, ku int) { + return b.mat.KL, b.mat.KU +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (b *BandDense) T() Matrix { + return Transpose{b} +} + +// TBand performs an implicit transpose by returning the receiver inside a TransposeBand. +func (b *BandDense) TBand() Banded { + return TransposeBand{b} +} + +// RawBand returns the underlying blas64.Band used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.Band. +func (b *BandDense) RawBand() blas64.Band { + return b.mat +} + +// SetRawBand sets the underlying blas64.Band used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +func (b *BandDense) SetRawBand(mat blas64.Band) { + b.mat = mat +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (b *BandDense) DiagView() Diagonal { + n := min(b.mat.Rows, b.mat.Cols) + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: b.mat.Stride, + Data: b.mat.Data[b.mat.KL : (n-1)*b.mat.Stride+b.mat.KL+1], + }, + } +} + +// DoNonZero calls the function fn for each of the non-zero elements of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoNonZero(fn func(i, j int, v float64)) { + for i := 0; i < min(b.mat.Rows, b.mat.Cols+b.mat.KL); i++ { + for j := max(0, i-b.mat.KL); j < min(b.mat.Cols, i+b.mat.KU+1); j++ { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || b.mat.Rows <= i { + panic(ErrRowAccess) + } + for j := max(0, i-b.mat.KL); j < min(b.mat.Cols, i+b.mat.KU+1); j++ { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || b.mat.Cols <= j { + panic(ErrColAccess) + } + for i := 0; i < min(b.mat.Rows, b.mat.Cols+b.mat.KL); i++ { + if i-b.mat.KL <= j && j < i+b.mat.KU+1 { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// Zero sets all of the matrix elements to zero. +func (b *BandDense) Zero() { + m := b.mat.Rows + kL := b.mat.KL + nCol := b.mat.KU + 1 + kL + for i := 0; i < m; i++ { + l := max(0, kL-i) + u := min(nCol, m+kL-i) + zero(b.mat.Data[i*b.mat.Stride+l : i*b.mat.Stride+u]) + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/cdense.go b/vendor/gonum.org/v1/gonum/mat/cdense.go new file mode 100644 index 00000000..9c29d1af --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/cdense.go @@ -0,0 +1,168 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import "gonum.org/v1/gonum/blas/cblas128" + +// Dense is a dense matrix representation with complex data. +type CDense struct { + mat cblas128.General + + capRows, capCols int +} + +// Dims returns the number of rows and columns in the matrix. +func (m *CDense) Dims() (r, c int) { + return m.mat.Rows, m.mat.Cols +} + +// H performs an implicit conjugate transpose by returning the receiver inside a +// Conjugate. +func (m *CDense) H() CMatrix { + return Conjugate{m} +} + +// NewCDense creates a new complex Dense matrix with r rows and c columns. +// If data == nil, a new slice is allocated for the backing slice. +// If len(data) == r*c, data is used as the backing slice, and changes to the +// elements of the returned CDense will be reflected in data. +// If neither of these is true, NewCDense will panic. +// NewCDense will panic if either r or c is zero. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +func NewCDense(r, c int, data []complex128) *CDense { + if r <= 0 || c <= 0 { + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data != nil && r*c != len(data) { + panic(ErrShape) + } + if data == nil { + data = make([]complex128, r*c) + } + return &CDense{ + mat: cblas128.General{ + Rows: r, + Cols: c, + Stride: c, + Data: data, + }, + capRows: r, + capCols: c, + } +} + +// reuseAs resizes an empty matrix to a r×c matrix, +// or checks that a non-empty matrix is r×c. +// +// reuseAs must be kept in sync with reuseAsZeroed. +func (m *CDense) reuseAs(r, c int) { + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = cblas128.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useC(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } +} + +func (m *CDense) reuseAsZeroed(r, c int) { + // This must be kept in-sync with reuseAs. + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = cblas128.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useZeroedC(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } + m.Zero() +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (m *CDense) Reset() { + // Row, Cols and Stride must be zeroed in unison. + m.mat.Rows, m.mat.Cols, m.mat.Stride = 0, 0, 0 + m.capRows, m.capCols = 0, 0 + m.mat.Data = m.mat.Data[:0] +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. CDense matrices can be zeroed using Reset. +func (m *CDense) IsZero() bool { + // It must be the case that m.Dims() returns + // zeros in this case. See comment in Reset(). + return m.mat.Stride == 0 +} + +// Zero sets all of the matrix elements to zero. +func (m *CDense) Zero() { + r := m.mat.Rows + c := m.mat.Cols + for i := 0; i < r; i++ { + zeroC(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + } +} + +// Copy makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two matrices and +// returns the number of rows and columns it copied. If a aliases the receiver +// and is a transposed Dense or VecDense, with a non-unitary increment, Copy will +// panic. +// +// See the Copier interface for more information. +func (m *CDense) Copy(a CMatrix) (r, c int) { + r, c = a.Dims() + if a == m { + return r, c + } + r = min(r, m.mat.Rows) + c = min(c, m.mat.Cols) + if r == 0 || c == 0 { + return 0, 0 + } + // TODO(btracey): Check for overlap when complex version exists. + // TODO(btracey): Add fast-paths. + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + m.set(i, j, a.At(i, j)) + } + } + return r, c +} diff --git a/vendor/gonum.org/v1/gonum/mat/cholesky.go b/vendor/gonum.org/v1/gonum/mat/cholesky.go new file mode 100644 index 00000000..bee43853 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/cholesky.go @@ -0,0 +1,673 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const ( + badTriangle = "mat: invalid triangle" + badCholesky = "mat: invalid Cholesky factorization" +) + +var ( + _ Matrix = (*Cholesky)(nil) + _ Symmetric = (*Cholesky)(nil) +) + +// Cholesky is a symmetric positive definite matrix represented by its +// Cholesky decomposition. +// +// The decomposition can be constructed using the Factorize method. The +// factorization itself can be extracted using the UTo or LTo methods, and the +// original symmetric matrix can be recovered with ToSym. +// +// Note that this matrix representation is useful for certain operations, in +// particular finding solutions to linear equations. It is very inefficient +// at other operations, in particular At is slow. +// +// Cholesky methods may only be called on a value that has been successfully +// initialized by a call to Factorize that has returned true. Calls to methods +// of an unsuccessful Cholesky factorization will panic. +type Cholesky struct { + // The chol pointer must never be retained as a pointer outside the Cholesky + // struct, either by returning chol outside the struct or by setting it to + // a pointer coming from outside. The same prohibition applies to the data + // slice within chol. + chol *TriDense + cond float64 +} + +// updateCond updates the condition number of the Cholesky decomposition. If +// norm > 0, then that norm is used as the norm of the original matrix A, otherwise +// the norm is estimated from the decomposition. +func (c *Cholesky) updateCond(norm float64) { + n := c.chol.mat.N + work := getFloats(3*n, false) + defer putFloats(work) + if norm < 0 { + // This is an approximation. By the definition of a norm, + // |AB| <= |A| |B|. + // Since A = U^T*U, we get for the condition number κ that + // κ(A) := |A| |A^-1| = |U^T*U| |A^-1| <= |U^T| |U| |A^-1|, + // so this will overestimate the condition number somewhat. + // The norm of the original factorized matrix cannot be stored + // because of update possibilities. + unorm := lapack64.Lantr(CondNorm, c.chol.mat, work) + lnorm := lapack64.Lantr(CondNormTrans, c.chol.mat, work) + norm = unorm * lnorm + } + sym := c.chol.asSymBlas() + iwork := getInts(n, false) + v := lapack64.Pocon(sym, norm, work, iwork) + putInts(iwork) + c.cond = 1 / v +} + +// Dims returns the dimensions of the matrix. +func (ch *Cholesky) Dims() (r, c int) { + if !ch.valid() { + panic(badCholesky) + } + r, c = ch.chol.Dims() + return r, c +} + +// At returns the element at row i, column j. +func (c *Cholesky) At(i, j int) float64 { + if !c.valid() { + panic(badCholesky) + } + n := c.Symmetric() + if uint(i) >= uint(n) { + panic(ErrRowAccess) + } + if uint(j) >= uint(n) { + panic(ErrColAccess) + } + + var val float64 + for k := 0; k <= min(i, j); k++ { + val += c.chol.at(k, i) * c.chol.at(k, j) + } + return val +} + +// T returns the the receiver, the transpose of a symmetric matrix. +func (c *Cholesky) T() Matrix { + return c +} + +// Symmetric implements the Symmetric interface and returns the number of rows +// in the matrix (this is also the number of columns). +func (c *Cholesky) Symmetric() int { + r, _ := c.chol.Dims() + return r +} + +// Cond returns the condition number of the factorized matrix. +func (c *Cholesky) Cond() float64 { + if !c.valid() { + panic(badCholesky) + } + return c.cond +} + +// Factorize calculates the Cholesky decomposition of the matrix A and returns +// whether the matrix is positive definite. If Factorize returns false, the +// factorization must not be used. +func (c *Cholesky) Factorize(a Symmetric) (ok bool) { + n := a.Symmetric() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + copySymIntoTriangle(c.chol, a) + + sym := c.chol.asSymBlas() + work := getFloats(c.chol.mat.N, false) + norm := lapack64.Lansy(CondNorm, sym, work) + putFloats(work) + _, ok = lapack64.Potrf(sym) + if ok { + c.updateCond(norm) + } else { + c.Reset() + } + return ok +} + +// Reset resets the factorization so that it can be reused as the receiver of a +// dimensionally restricted operation. +func (c *Cholesky) Reset() { + if c.chol != nil { + c.chol.Reset() + } + c.cond = math.Inf(1) +} + +// SetFromU sets the Cholesky decomposition from the given triangular matrix. +// SetFromU panics if t is not upper triangular. Note that t is copied into, +// not stored inside, the receiver. +func (c *Cholesky) SetFromU(t *TriDense) { + n, kind := t.Triangle() + if kind != Upper { + panic("cholesky: matrix must be upper triangular") + } + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + c.chol.Copy(t) + c.updateCond(-1) +} + +// Clone makes a copy of the input Cholesky into the receiver, overwriting the +// previous value of the receiver. Clone does not place any restrictions on receiver +// shape. Clone panics if the input Cholesky is not the result of a valid decomposition. +func (c *Cholesky) Clone(chol *Cholesky) { + if !chol.valid() { + panic(badCholesky) + } + n := chol.Symmetric() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + c.chol.Copy(chol.chol) + c.cond = chol.cond +} + +// Det returns the determinant of the matrix that has been factorized. +func (c *Cholesky) Det() float64 { + if !c.valid() { + panic(badCholesky) + } + return math.Exp(c.LogDet()) +} + +// LogDet returns the log of the determinant of the matrix that has been factorized. +func (c *Cholesky) LogDet() float64 { + if !c.valid() { + panic(badCholesky) + } + var det float64 + for i := 0; i < c.chol.mat.N; i++ { + det += 2 * math.Log(c.chol.mat.Data[i*c.chol.mat.Stride+i]) + } + return det +} + +// SolveTo finds the matrix X that solves A * X = B where A is represented +// by the Cholesky decomposition. The result is stored in-place into dst. +func (c *Cholesky) SolveTo(dst *Dense, b Matrix) error { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + bm, bn := b.Dims() + if n != bm { + panic(ErrShape) + } + + dst.reuseAs(bm, bn) + if b != dst { + dst.Copy(b) + } + lapack64.Potrs(c.chol.mat, dst.mat) + if c.cond > ConditionTolerance { + return Condition(c.cond) + } + return nil +} + +// SolveCholTo finds the matrix X that solves A * X = B where A and B are represented +// by their Cholesky decompositions a and b. The result is stored in-place into +// dst. +func (a *Cholesky) SolveCholTo(dst *Dense, b *Cholesky) error { + if !a.valid() || !b.valid() { + panic(badCholesky) + } + bn := b.chol.mat.N + if a.chol.mat.N != bn { + panic(ErrShape) + } + + dst.reuseAsZeroed(bn, bn) + dst.Copy(b.chol.T()) + blas64.Trsm(blas.Left, blas.Trans, 1, a.chol.mat, dst.mat) + blas64.Trsm(blas.Left, blas.NoTrans, 1, a.chol.mat, dst.mat) + blas64.Trmm(blas.Right, blas.NoTrans, 1, b.chol.mat, dst.mat) + if a.cond > ConditionTolerance { + return Condition(a.cond) + } + return nil +} + +// SolveVecTo finds the vector X that solves A * x = b where A is represented +// by the Cholesky decomposition. The result is stored in-place into +// dst. +func (c *Cholesky) SolveVecTo(dst *VecDense, b Vector) error { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if br, bc := b.Dims(); br != n || bc != 1 { + panic(ErrShape) + } + switch rv := b.(type) { + default: + dst.reuseAs(n) + return c.SolveTo(dst.asDense(), b) + case RawVectorer: + bmat := rv.RawVector() + if dst != b { + dst.checkOverlap(bmat) + } + dst.reuseAs(n) + if dst != b { + dst.CopyVec(b) + } + lapack64.Potrs(c.chol.mat, dst.asGeneral()) + if c.cond > ConditionTolerance { + return Condition(c.cond) + } + return nil + } +} + +// RawU returns the Triangular matrix used to store the Cholesky decomposition of +// the original matrix A. The returned matrix should not be modified. If it is +// modified, the decomposition is invalid and should not be used. +func (c *Cholesky) RawU() Triangular { + return c.chol +} + +// UTo extracts the n×n upper triangular matrix U from a Cholesky +// decomposition into dst and returns the result. If dst is nil a new +// TriDense is allocated. +// A = U^T * U. +func (c *Cholesky) UTo(dst *TriDense) *TriDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewTriDense(n, Upper, make([]float64, n*n)) + } else { + dst.reuseAs(n, Upper) + } + dst.Copy(c.chol) + return dst +} + +// LTo extracts the n×n lower triangular matrix L from a Cholesky +// decomposition into dst and returns the result. If dst is nil a new +// TriDense is allocated. +// A = L * L^T. +func (c *Cholesky) LTo(dst *TriDense) *TriDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewTriDense(n, Lower, make([]float64, n*n)) + } else { + dst.reuseAs(n, Lower) + } + dst.Copy(c.chol.TTri()) + return dst +} + +// ToSym reconstructs the original positive definite matrix given its +// Cholesky decomposition into dst and returns the result. If dst is nil +// a new SymDense is allocated. +func (c *Cholesky) ToSym(dst *SymDense) *SymDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewSymDense(n, nil) + } else { + dst.reuseAs(n) + } + // Create a TriDense representing the Cholesky factor U with dst's + // backing slice. + // Operations on u are reflected in s. + u := &TriDense{ + mat: blas64.Triangular{ + Uplo: blas.Upper, + Diag: blas.NonUnit, + N: n, + Data: dst.mat.Data, + Stride: dst.mat.Stride, + }, + cap: n, + } + u.Copy(c.chol) + // Compute the product U^T*U using the algorithm from LAPACK/TESTING/LIN/dpot01.f + a := u.mat.Data + lda := u.mat.Stride + bi := blas64.Implementation() + for k := n - 1; k >= 0; k-- { + a[k*lda+k] = bi.Ddot(k+1, a[k:], lda, a[k:], lda) + if k > 0 { + bi.Dtrmv(blas.Upper, blas.Trans, blas.NonUnit, k, a, lda, a[k:], lda) + } + } + return dst +} + +// InverseTo computes the inverse of the matrix represented by its Cholesky +// factorization and stores the result into s. If the factorized +// matrix is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally be +// avoided where possible, for example by using the Solve routines. +func (c *Cholesky) InverseTo(s *SymDense) error { + if !c.valid() { + panic(badCholesky) + } + s.reuseAs(c.chol.mat.N) + // Create a TriDense representing the Cholesky factor U with the backing + // slice from s. + // Operations on u are reflected in s. + u := &TriDense{ + mat: blas64.Triangular{ + Uplo: blas.Upper, + Diag: blas.NonUnit, + N: s.mat.N, + Data: s.mat.Data, + Stride: s.mat.Stride, + }, + cap: s.mat.N, + } + u.Copy(c.chol) + + _, ok := lapack64.Potri(u.mat) + if !ok { + return Condition(math.Inf(1)) + } + if c.cond > ConditionTolerance { + return Condition(c.cond) + } + return nil +} + +// Scale multiplies the original matrix A by a positive constant using +// its Cholesky decomposition, storing the result in-place into the receiver. +// That is, if the original Cholesky factorization is +// U^T * U = A +// the updated factorization is +// U'^T * U' = f A = A' +// Scale panics if the constant is non-positive, or if the receiver is non-zero +// and is of a different size from the input. +func (c *Cholesky) Scale(f float64, orig *Cholesky) { + if !orig.valid() { + panic(badCholesky) + } + if f <= 0 { + panic("cholesky: scaling by a non-positive constant") + } + n := orig.Symmetric() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else if c.chol.mat.N != n { + panic(ErrShape) + } + c.chol.ScaleTri(math.Sqrt(f), orig.chol) + c.cond = orig.cond // Scaling by a positive constant does not change the condition number. +} + +// ExtendVecSym computes the Cholesky decomposition of the original matrix A, +// whose Cholesky decomposition is in a, extended by a the n×1 vector v according to +// [A w] +// [w' k] +// where k = v[n-1] and w = v[:n-1]. The result is stored into the receiver. +// In order for the updated matrix to be positive definite, it must be the case +// that k > w' A^-1 w. If this condition does not hold then ExtendVecSym will +// return false and the receiver will not be updated. +// +// ExtendVecSym will panic if v.Len() != a.Symmetric()+1 or if a does not contain +// a valid decomposition. +func (c *Cholesky) ExtendVecSym(a *Cholesky, v Vector) (ok bool) { + n := a.Symmetric() + + if v.Len() != n+1 { + panic(badSliceLength) + } + if !a.valid() { + panic(badCholesky) + } + + // The algorithm is commented here, but see also + // https://math.stackexchange.com/questions/955874/cholesky-factor-when-adding-a-row-and-column-to-already-factorized-matrix + // We have A and want to compute the Cholesky of + // [A w] + // [w' k] + // We want + // [U c] + // [0 d] + // to be the updated Cholesky, and so it must be that + // [A w] = [U' 0] [U c] + // [w' k] [c' d] [0 d] + // Thus, we need + // 1) A = U'U (true by the original decomposition being valid), + // 2) U' * c = w => c = U'^-1 w + // 3) c'*c + d'*d = k => d = sqrt(k-c'*c) + + // First, compute c = U'^-1 a + // TODO(btracey): Replace this with CopyVec when issue 167 is fixed. + w := NewVecDense(n, nil) + for i := 0; i < n; i++ { + w.SetVec(i, v.At(i, 0)) + } + k := v.At(n, 0) + + var t VecDense + t.SolveVec(a.chol.T(), w) + + dot := Dot(&t, &t) + if dot >= k { + return false + } + d := math.Sqrt(k - dot) + + newU := NewTriDense(n+1, Upper, nil) + newU.Copy(a.chol) + for i := 0; i < n; i++ { + newU.SetTri(i, n, t.At(i, 0)) + } + newU.SetTri(n, n, d) + c.chol = newU + c.updateCond(-1) + return true +} + +// SymRankOne performs a rank-1 update of the original matrix A and refactorizes +// its Cholesky factorization, storing the result into the receiver. That is, if +// in the original Cholesky factorization +// U^T * U = A, +// in the updated factorization +// U'^T * U' = A + alpha * x * x^T = A'. +// +// Note that when alpha is negative, the updating problem may be ill-conditioned +// and the results may be inaccurate, or the updated matrix A' may not be +// positive definite and not have a Cholesky factorization. SymRankOne returns +// whether the updated matrix A' is positive definite. +// +// SymRankOne updates a Cholesky factorization in O(n²) time. The Cholesky +// factorization computation from scratch is O(n³). +func (c *Cholesky) SymRankOne(orig *Cholesky, alpha float64, x Vector) (ok bool) { + if !orig.valid() { + panic(badCholesky) + } + n := orig.Symmetric() + if r, c := x.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if orig != c { + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else if c.chol.mat.N != n { + panic(ErrShape) + } + c.chol.Copy(orig.chol) + } + + if alpha == 0 { + return true + } + + // Algorithms for updating and downdating the Cholesky factorization are + // described, for example, in + // - J. J. Dongarra, J. R. Bunch, C. B. Moler, G. W. Stewart: LINPACK + // Users' Guide. SIAM (1979), pages 10.10--10.14 + // or + // - P. E. Gill, G. H. Golub, W. Murray, and M. A. Saunders: Methods for + // modifying matrix factorizations. Mathematics of Computation 28(126) + // (1974), Method C3 on page 521 + // + // The implementation is based on LINPACK code + // http://www.netlib.org/linpack/dchud.f + // http://www.netlib.org/linpack/dchdd.f + // and + // https://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646 + // + // According to http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00301.html + // LINPACK is released under BSD license. + // + // See also: + // - M. A. Saunders: Large-scale Linear Programming Using the Cholesky + // Factorization. Technical Report Stanford University (1972) + // http://i.stanford.edu/pub/cstr/reports/cs/tr/72/252/CS-TR-72-252.pdf + // - Matthias Seeger: Low rank updates for the Cholesky decomposition. + // EPFL Technical Report 161468 (2004) + // http://infoscience.epfl.ch/record/161468 + + work := getFloats(n, false) + defer putFloats(work) + var xmat blas64.Vector + if rv, ok := x.(RawVectorer); ok { + xmat = rv.RawVector() + } else { + var tmp *VecDense + tmp.CopyVec(x) + xmat = tmp.RawVector() + } + blas64.Copy(xmat, blas64.Vector{N: n, Data: work, Inc: 1}) + + if alpha > 0 { + // Compute rank-1 update. + if alpha != 1 { + blas64.Scal(math.Sqrt(alpha), blas64.Vector{N: n, Data: work, Inc: 1}) + } + umat := c.chol.mat + stride := umat.Stride + for i := 0; i < n; i++ { + // Compute parameters of the Givens matrix that zeroes + // the i-th element of x. + c, s, r, _ := blas64.Rotg(umat.Data[i*stride+i], work[i]) + if r < 0 { + // Multiply by -1 to have positive diagonal + // elemnts. + r *= -1 + c *= -1 + s *= -1 + } + umat.Data[i*stride+i] = r + if i < n-1 { + // Multiply the extended factorization matrix by + // the Givens matrix from the left. Only + // the i-th row and x are modified. + blas64.Rot( + blas64.Vector{N: n - i - 1, Data: umat.Data[i*stride+i+1 : i*stride+n], Inc: 1}, + blas64.Vector{N: n - i - 1, Data: work[i+1 : n], Inc: 1}, + c, s) + } + } + c.updateCond(-1) + return true + } + + // Compute rank-1 downdate. + alpha = math.Sqrt(-alpha) + if alpha != 1 { + blas64.Scal(alpha, blas64.Vector{N: n, Data: work, Inc: 1}) + } + // Solve U^T * p = x storing the result into work. + ok = lapack64.Trtrs(blas.Trans, c.chol.RawTriangular(), blas64.General{ + Rows: n, + Cols: 1, + Stride: 1, + Data: work, + }) + if !ok { + // The original matrix is singular. Should not happen, because + // the factorization is valid. + panic(badCholesky) + } + norm := blas64.Nrm2(blas64.Vector{N: n, Data: work, Inc: 1}) + if norm >= 1 { + // The updated matrix is not positive definite. + return false + } + norm = math.Sqrt((1 + norm) * (1 - norm)) + cos := getFloats(n, false) + defer putFloats(cos) + sin := getFloats(n, false) + defer putFloats(sin) + for i := n - 1; i >= 0; i-- { + // Compute parameters of Givens matrices that zero elements of p + // backwards. + cos[i], sin[i], norm, _ = blas64.Rotg(norm, work[i]) + if norm < 0 { + norm *= -1 + cos[i] *= -1 + sin[i] *= -1 + } + } + umat := c.chol.mat + stride := umat.Stride + for i := n - 1; i >= 0; i-- { + work[i] = 0 + // Apply Givens matrices to U. + // TODO(vladimir-ch): Use workspace to avoid modifying the + // receiver in case an invalid factorization is created. + blas64.Rot( + blas64.Vector{N: n - i, Data: work[i:n], Inc: 1}, + blas64.Vector{N: n - i, Data: umat.Data[i*stride+i : i*stride+n], Inc: 1}, + cos[i], sin[i]) + if umat.Data[i*stride+i] == 0 { + // The matrix is singular (may rarely happen due to + // floating-point effects?). + ok = false + } else if umat.Data[i*stride+i] < 0 { + // Diagonal elements should be positive. If it happens + // that on the i-th row the diagonal is negative, + // multiply U from the left by an identity matrix that + // has -1 on the i-th row. + blas64.Scal(-1, blas64.Vector{N: n - i, Data: umat.Data[i*stride+i : i*stride+n], Inc: 1}) + } + } + if ok { + c.updateCond(-1) + } else { + c.Reset() + } + return ok +} + +func (c *Cholesky) valid() bool { + return c.chol != nil && !c.chol.IsZero() +} diff --git a/vendor/gonum.org/v1/gonum/mat/cmatrix.go b/vendor/gonum.org/v1/gonum/mat/cmatrix.go new file mode 100644 index 00000000..6219c28a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/cmatrix.go @@ -0,0 +1,210 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + "math/cmplx" + + "gonum.org/v1/gonum/floats" +) + +// CMatrix is the basic matrix interface type for complex matrices. +type CMatrix interface { + // Dims returns the dimensions of a Matrix. + Dims() (r, c int) + + // At returns the value of a matrix element at row i, column j. + // It will panic if i or j are out of bounds for the matrix. + At(i, j int) complex128 + + // H returns the conjugate transpose of the Matrix. Whether H + // returns a copy of the underlying data is implementation dependent. + // This method may be implemented using the Conjugate type, which + // provides an implicit matrix conjugate transpose. + H() CMatrix +} + +var ( + _ CMatrix = Conjugate{} + _ Unconjugator = Conjugate{} +) + +// Conjugate is a type for performing an implicit matrix conjugate transpose. +// It implements the Matrix interface, returning values from the conjugate +// transpose of the matrix within. +type Conjugate struct { + CMatrix CMatrix +} + +// At returns the value of the element at row i and column j of the conjugate +// transposed matrix, that is, row j and column i of the Matrix field. +func (t Conjugate) At(i, j int) complex128 { + z := t.CMatrix.At(j, i) + return cmplx.Conj(z) +} + +// Dims returns the dimensions of the transposed matrix. The number of rows returned +// is the number of columns in the Matrix field, and the number of columns is +// the number of rows in the Matrix field. +func (t Conjugate) Dims() (r, c int) { + c, r = t.CMatrix.Dims() + return r, c +} + +// H performs an implicit conjugate transpose by returning the Matrix field. +func (t Conjugate) H() CMatrix { + return t.CMatrix +} + +// Unconjugate returns the Matrix field. +func (t Conjugate) Unconjugate() CMatrix { + return t.CMatrix +} + +// Unconjugator is a type that can undo an implicit conjugate transpose. +type Unconjugator interface { + // Note: This interface is needed to unify all of the Conjugate types. In + // the cmat128 methods, we need to test if the Matrix has been implicitly + // transposed. If this is checked by testing for the specific Conjugate type + // then the behavior will be different if the user uses H() or HTri() for a + // triangular matrix. + + // Unconjugate returns the underlying Matrix stored for the implicit + // conjugate transpose. + Unconjugate() CMatrix +} + +// useC returns a complex128 slice with l elements, using c if it +// has the necessary capacity, otherwise creating a new slice. +func useC(c []complex128, l int) []complex128 { + if l <= cap(c) { + return c[:l] + } + return make([]complex128, l) +} + +// useZeroedC returns a complex128 slice with l elements, using c if it +// has the necessary capacity, otherwise creating a new slice. The +// elements of the returned slice are guaranteed to be zero. +func useZeroedC(c []complex128, l int) []complex128 { + if l <= cap(c) { + c = c[:l] + zeroC(c) + return c + } + return make([]complex128, l) +} + +// zeroC zeros the given slice's elements. +func zeroC(c []complex128) { + for i := range c { + c[i] = 0 + } +} + +// unconjugate unconjugates a matrix if applicable. If a is an Unconjugator, then +// unconjugate returns the underlying matrix and true. If it is not, then it returns +// the input matrix and false. +func unconjugate(a CMatrix) (CMatrix, bool) { + if ut, ok := a.(Unconjugator); ok { + return ut.Unconjugate(), true + } + return a, false +} + +// CEqual returns whether the matrices a and b have the same size +// and are element-wise equal. +func CEqual(a, b CMatrix) bool { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + // TODO(btracey): Add in fast-paths. + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if a.At(i, j) != b.At(i, j) { + return false + } + } + } + return true +} + +// CEqualApprox returns whether the matrices a and b have the same size and contain all equal +// elements with tolerance for element-wise equality specified by epsilon. Matrices +// with non-equal shapes are not equal. +func CEqualApprox(a, b CMatrix, epsilon float64) bool { + // TODO(btracey): + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if !cEqualWithinAbsOrRel(a.At(i, j), b.At(i, j), epsilon, epsilon) { + return false + } + } + } + return true +} + +// TODO(btracey): Move these into a cmplxs if/when we have one. + +func cEqualWithinAbsOrRel(a, b complex128, absTol, relTol float64) bool { + if cEqualWithinAbs(a, b, absTol) { + return true + } + return cEqualWithinRel(a, b, relTol) +} + +// cEqualWithinAbs returns true if a and b have an absolute +// difference of less than tol. +func cEqualWithinAbs(a, b complex128, tol float64) bool { + return a == b || cmplx.Abs(a-b) <= tol +} + +const minNormalFloat64 = 2.2250738585072014e-308 + +// cEqualWithinRel returns true if the difference between a and b +// is not greater than tol times the greater value. +func cEqualWithinRel(a, b complex128, tol float64) bool { + if a == b { + return true + } + if cmplx.IsNaN(a) || cmplx.IsNaN(b) { + return false + } + // Cannot play the same trick as in floats because there are multiple + // possible infinities. + if cmplx.IsInf(a) { + if !cmplx.IsInf(b) { + return false + } + ra := real(a) + if math.IsInf(ra, 0) { + if ra == real(b) { + return floats.EqualWithinRel(imag(a), imag(b), tol) + } + return false + } + if imag(a) == imag(b) { + return floats.EqualWithinRel(ra, real(b), tol) + } + return false + } + if cmplx.IsInf(b) { + return false + } + + delta := cmplx.Abs(a - b) + if delta <= minNormalFloat64 { + return delta <= tol*minNormalFloat64 + } + return delta/math.Max(cmplx.Abs(a), cmplx.Abs(b)) <= tol +} diff --git a/vendor/gonum.org/v1/gonum/mat/consts.go b/vendor/gonum.org/v1/gonum/mat/consts.go new file mode 100644 index 00000000..3de3f5bf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/consts.go @@ -0,0 +1,15 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +// TriKind represents the triangularity of the matrix. +type TriKind bool + +const ( + // Upper specifies an upper triangular matrix. + Upper TriKind = true + // Lower specifies a lower triangular matrix. + Lower TriKind = false +) diff --git a/vendor/gonum.org/v1/gonum/mat/dense.go b/vendor/gonum.org/v1/gonum/mat/dense.go new file mode 100644 index 00000000..87b1105c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/dense.go @@ -0,0 +1,558 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + dense *Dense + + _ Matrix = dense + _ Mutable = dense + + _ Cloner = dense + _ RowViewer = dense + _ ColViewer = dense + _ RawRowViewer = dense + _ Grower = dense + + _ RawMatrixSetter = dense + _ RawMatrixer = dense + + _ Reseter = dense +) + +// Dense is a dense matrix representation. +type Dense struct { + mat blas64.General + + capRows, capCols int +} + +// NewDense creates a new Dense matrix with r rows and c columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == r*c, data is +// used as the backing slice, and changes to the elements of the returned Dense +// will be reflected in data. If neither of these is true, NewDense will panic. +// NewDense will panic if either r or c is zero. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +func NewDense(r, c int, data []float64) *Dense { + if r <= 0 || c <= 0 { + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data != nil && r*c != len(data) { + panic(ErrShape) + } + if data == nil { + data = make([]float64, r*c) + } + return &Dense{ + mat: blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: data, + }, + capRows: r, + capCols: c, + } +} + +// reuseAs resizes an empty matrix to a r×c matrix, +// or checks that a non-empty matrix is r×c. +// +// reuseAs must be kept in sync with reuseAsZeroed. +func (m *Dense) reuseAs(r, c int) { + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: use(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } +} + +// reuseAsZeroed resizes an empty matrix to a r×c matrix, +// or checks that a non-empty matrix is r×c. It zeroes +// all the elements of the matrix. +// +// reuseAsZeroed must be kept in sync with reuseAs. +func (m *Dense) reuseAsZeroed(r, c int) { + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useZeroed(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } + m.Zero() +} + +// Zero sets all of the matrix elements to zero. +func (m *Dense) Zero() { + r := m.mat.Rows + c := m.mat.Cols + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + } +} + +// isolatedWorkspace returns a new dense matrix w with the size of a and +// returns a callback to defer which performs cleanup at the return of the call. +// This should be used when a method receiver is the same pointer as an input argument. +func (m *Dense) isolatedWorkspace(a Matrix) (w *Dense, restore func()) { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + w = getWorkspace(r, c, false) + return w, func() { + m.Copy(w) + putWorkspace(w) + } +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (m *Dense) Reset() { + // Row, Cols and Stride must be zeroed in unison. + m.mat.Rows, m.mat.Cols, m.mat.Stride = 0, 0, 0 + m.capRows, m.capCols = 0, 0 + m.mat.Data = m.mat.Data[:0] +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. Dense matrices can be zeroed using Reset. +func (m *Dense) IsZero() bool { + // It must be the case that m.Dims() returns + // zeros in this case. See comment in Reset(). + return m.mat.Stride == 0 +} + +// asTriDense returns a TriDense with the given size and side. The backing data +// of the TriDense is the same as the receiver. +func (m *Dense) asTriDense(n int, diag blas.Diag, uplo blas.Uplo) *TriDense { + return &TriDense{ + mat: blas64.Triangular{ + N: n, + Stride: m.mat.Stride, + Data: m.mat.Data, + Uplo: uplo, + Diag: diag, + }, + cap: n, + } +} + +// DenseCopyOf returns a newly allocated copy of the elements of a. +func DenseCopyOf(a Matrix) *Dense { + d := &Dense{} + d.Clone(a) + return d +} + +// SetRawMatrix sets the underlying blas64.General used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in b. +func (m *Dense) SetRawMatrix(b blas64.General) { + m.capRows, m.capCols = b.Rows, b.Cols + m.mat = b +} + +// RawMatrix returns the underlying blas64.General used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.General. +func (m *Dense) RawMatrix() blas64.General { return m.mat } + +// Dims returns the number of rows and columns in the matrix. +func (m *Dense) Dims() (r, c int) { return m.mat.Rows, m.mat.Cols } + +// Caps returns the number of rows and columns in the backing matrix. +func (m *Dense) Caps() (r, c int) { return m.capRows, m.capCols } + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (m *Dense) T() Matrix { + return Transpose{m} +} + +// ColView returns a Vector reflecting the column j, backed by the matrix data. +// +// See ColViewer for more information. +func (m *Dense) ColView(j int) Vector { + var v VecDense + v.ColViewOf(m, j) + return &v +} + +// SetCol sets the values in the specified column of the matrix to the values +// in src. len(src) must equal the number of rows in the receiver. +func (m *Dense) SetCol(j int, src []float64) { + if j >= m.mat.Cols || j < 0 { + panic(ErrColAccess) + } + if len(src) != m.mat.Rows { + panic(ErrColLength) + } + + blas64.Copy( + blas64.Vector{N: m.mat.Rows, Inc: 1, Data: src}, + blas64.Vector{N: m.mat.Rows, Inc: m.mat.Stride, Data: m.mat.Data[j:]}, + ) +} + +// SetRow sets the values in the specified rows of the matrix to the values +// in src. len(src) must equal the number of columns in the receiver. +func (m *Dense) SetRow(i int, src []float64) { + if i >= m.mat.Rows || i < 0 { + panic(ErrRowAccess) + } + if len(src) != m.mat.Cols { + panic(ErrRowLength) + } + + copy(m.rawRowView(i), src) +} + +// RowView returns row i of the matrix data represented as a column vector, +// backed by the matrix data. +// +// See RowViewer for more information. +func (m *Dense) RowView(i int) Vector { + var v VecDense + v.RowViewOf(m, i) + return &v +} + +// RawRowView returns a slice backed by the same array as backing the +// receiver. +func (m *Dense) RawRowView(i int) []float64 { + if i >= m.mat.Rows || i < 0 { + panic(ErrRowAccess) + } + return m.rawRowView(i) +} + +func (m *Dense) rawRowView(i int) []float64 { + return m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+m.mat.Cols] +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (m *Dense) DiagView() Diagonal { + n := min(m.mat.Rows, m.mat.Cols) + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: m.mat.Stride + 1, + Data: m.mat.Data[:(n-1)*m.mat.Stride+n], + }, + } +} + +// Slice returns a new Matrix that shares backing data with the receiver. +// The returned matrix starts at {i,j} of the receiver and extends k-i rows +// and l-j columns. The final row in the resulting matrix is k-1 and the +// final column is l-1. +// Slice panics with ErrIndexOutOfRange if the slice is outside the capacity +// of the receiver. +func (m *Dense) Slice(i, k, j, l int) Matrix { + mr, mc := m.Caps() + if i < 0 || mr <= i || j < 0 || mc <= j || k < i || mr < k || l < j || mc < l { + if i == k || j == l { + panic(ErrZeroLength) + } + panic(ErrIndexOutOfRange) + } + t := *m + t.mat.Data = t.mat.Data[i*t.mat.Stride+j : (k-1)*t.mat.Stride+l] + t.mat.Rows = k - i + t.mat.Cols = l - j + t.capRows -= i + t.capCols -= j + return &t +} + +// Grow returns the receiver expanded by r rows and c columns. If the dimensions +// of the expanded matrix are outside the capacities of the receiver a new +// allocation is made, otherwise not. Note the receiver itself is not modified +// during the call to Grow. +func (m *Dense) Grow(r, c int) Matrix { + if r < 0 || c < 0 { + panic(ErrIndexOutOfRange) + } + if r == 0 && c == 0 { + return m + } + + r += m.mat.Rows + c += m.mat.Cols + + var t Dense + switch { + case m.mat.Rows == 0 || m.mat.Cols == 0: + t.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + // We zero because we don't know how the matrix will be used. + // In other places, the mat is immediately filled with a result; + // this is not the case here. + Data: useZeroed(m.mat.Data, r*c), + } + case r > m.capRows || c > m.capCols: + cr := max(r, m.capRows) + cc := max(c, m.capCols) + t.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: cc, + Data: make([]float64, cr*cc), + } + t.capRows = cr + t.capCols = cc + // Copy the complete matrix over to the new matrix. + // Including elements not currently visible. Use a temporary structure + // to avoid modifying the receiver. + var tmp Dense + tmp.mat = blas64.General{ + Rows: m.mat.Rows, + Cols: m.mat.Cols, + Stride: m.mat.Stride, + Data: m.mat.Data, + } + tmp.capRows = m.capRows + tmp.capCols = m.capCols + t.Copy(&tmp) + return &t + default: + t.mat = blas64.General{ + Data: m.mat.Data[:(r-1)*m.mat.Stride+c], + Rows: r, + Cols: c, + Stride: m.mat.Stride, + } + } + t.capRows = r + t.capCols = c + return &t +} + +// Clone makes a copy of a into the receiver, overwriting the previous value of +// the receiver. The clone operation does not make any restriction on shape and +// will not cause shadowing. +// +// See the Cloner interface for more information. +func (m *Dense) Clone(a Matrix) { + r, c := a.Dims() + mat := blas64.General{ + Rows: r, + Cols: c, + Stride: c, + } + m.capRows, m.capCols = r, c + + aU, trans := untranspose(a) + switch aU := aU.(type) { + case RawMatrixer: + amat := aU.RawMatrix() + mat.Data = make([]float64, r*c) + if trans { + for i := 0; i < r; i++ { + blas64.Copy(blas64.Vector{N: c, Inc: amat.Stride, Data: amat.Data[i : i+(c-1)*amat.Stride+1]}, + blas64.Vector{N: c, Inc: 1, Data: mat.Data[i*c : (i+1)*c]}) + } + } else { + for i := 0; i < r; i++ { + copy(mat.Data[i*c:(i+1)*c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + } + case *VecDense: + amat := aU.mat + mat.Data = make([]float64, aU.mat.N) + blas64.Copy(blas64.Vector{N: aU.mat.N, Inc: amat.Inc, Data: amat.Data}, + blas64.Vector{N: aU.mat.N, Inc: 1, Data: mat.Data}) + default: + mat.Data = make([]float64, r*c) + w := *m + w.mat = mat + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + w.set(i, j, a.At(i, j)) + } + } + *m = w + return + } + m.mat = mat +} + +// Copy makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two matrices and +// returns the number of rows and columns it copied. If a aliases the receiver +// and is a transposed Dense or VecDense, with a non-unitary increment, Copy will +// panic. +// +// See the Copier interface for more information. +func (m *Dense) Copy(a Matrix) (r, c int) { + r, c = a.Dims() + if a == m { + return r, c + } + r = min(r, m.mat.Rows) + c = min(c, m.mat.Cols) + if r == 0 || c == 0 { + return 0, 0 + } + + aU, trans := untranspose(a) + switch aU := aU.(type) { + case RawMatrixer: + amat := aU.RawMatrix() + if trans { + if amat.Stride != 1 { + m.checkOverlap(amat) + } + for i := 0; i < r; i++ { + blas64.Copy(blas64.Vector{N: c, Inc: amat.Stride, Data: amat.Data[i : i+(c-1)*amat.Stride+1]}, + blas64.Vector{N: c, Inc: 1, Data: m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]}) + } + } else { + switch o := offset(m.mat.Data, amat.Data); { + case o < 0: + for i := r - 1; i >= 0; i-- { + copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + case o > 0: + for i := 0; i < r; i++ { + copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + default: + // Nothing to do. + } + } + case *VecDense: + var n, stride int + amat := aU.mat + if trans { + if amat.Inc != 1 { + m.checkOverlap(aU.asGeneral()) + } + n = c + stride = 1 + } else { + n = r + stride = m.mat.Stride + } + if amat.Inc == 1 && stride == 1 { + copy(m.mat.Data, amat.Data[:n]) + break + } + switch o := offset(m.mat.Data, amat.Data); { + case o < 0: + blas64.Copy(blas64.Vector{N: n, Inc: -amat.Inc, Data: amat.Data}, + blas64.Vector{N: n, Inc: -stride, Data: m.mat.Data}) + case o > 0: + blas64.Copy(blas64.Vector{N: n, Inc: amat.Inc, Data: amat.Data}, + blas64.Vector{N: n, Inc: stride, Data: m.mat.Data}) + default: + // Nothing to do. + } + default: + m.checkOverlapMatrix(aU) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + m.set(i, j, a.At(i, j)) + } + } + } + + return r, c +} + +// Stack appends the rows of b onto the rows of a, placing the result into the +// receiver with b placed in the greater indexed rows. Stack will panic if the +// two input matrices do not have the same number of columns or the constructed +// stacked matrix is not the same shape as the receiver. +func (m *Dense) Stack(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ac != bc || m == a || m == b { + panic(ErrShape) + } + + m.reuseAs(ar+br, ac) + + m.Copy(a) + w := m.Slice(ar, ar+br, 0, bc).(*Dense) + w.Copy(b) +} + +// Augment creates the augmented matrix of a and b, where b is placed in the +// greater indexed columns. Augment will panic if the two input matrices do +// not have the same number of rows or the constructed augmented matrix is +// not the same shape as the receiver. +func (m *Dense) Augment(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || m == a || m == b { + panic(ErrShape) + } + + m.reuseAs(ar, ac+bc) + + m.Copy(a) + w := m.Slice(0, br, ac, ac+bc).(*Dense) + w.Copy(b) +} + +// Trace returns the trace of the matrix. The matrix must be square or Trace +// will panic. +func (m *Dense) Trace() float64 { + if m.mat.Rows != m.mat.Cols { + panic(ErrSquare) + } + // TODO(btracey): could use internal asm sum routine. + var v float64 + for i := 0; i < m.mat.Rows; i++ { + v += m.mat.Data[i*m.mat.Stride+i] + } + return v +} diff --git a/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go b/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go new file mode 100644 index 00000000..dd4526f6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go @@ -0,0 +1,886 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Add adds a and b element-wise, placing the result in the receiver. Add +// will panic if the two matrices do not have the same shape. +func (m *Dense) Add(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v + bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)+b.At(r, c)) + } + } +} + +// Sub subtracts the matrix b from a, placing the result in the receiver. Sub +// will panic if the two matrices do not have the same shape. +func (m *Dense) Sub(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v - bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)-b.At(r, c)) + } + } +} + +// MulElem performs element-wise multiplication of a and b, placing the result +// in the receiver. MulElem will panic if the two matrices do not have the same +// shape. +func (m *Dense) MulElem(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v * bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)*b.At(r, c)) + } + } +} + +// DivElem performs element-wise division of a by b, placing the result +// in the receiver. DivElem will panic if the two matrices do not have the same +// shape. +func (m *Dense) DivElem(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v / bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)/b.At(r, c)) + } + } +} + +// Inverse computes the inverse of the matrix a, storing the result into the +// receiver. If a is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally +// be avoided where possible, for example by using the Solve routines. +func (m *Dense) Inverse(a Matrix) error { + // TODO(btracey): Special case for RawTriangular, etc. + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + m.reuseAs(a.Dims()) + aU, aTrans := untranspose(a) + switch rm := aU.(type) { + case RawMatrixer: + if m != aU || aTrans { + if m == aU || m.checkOverlap(rm.RawMatrix()) { + tmp := getWorkspace(r, c, false) + tmp.Copy(a) + m.Copy(tmp) + putWorkspace(tmp) + break + } + m.Copy(a) + } + default: + m.Copy(a) + } + ipiv := getInts(r, false) + defer putInts(ipiv) + ok := lapack64.Getrf(m.mat, ipiv) + if !ok { + return Condition(math.Inf(1)) + } + work := getFloats(4*r, false) // must be at least 4*r for cond. + lapack64.Getri(m.mat, ipiv, work, -1) + if int(work[0]) > 4*r { + l := int(work[0]) + putFloats(work) + work = getFloats(l, false) + } else { + work = work[:4*r] + } + defer putFloats(work) + lapack64.Getri(m.mat, ipiv, work, len(work)) + norm := lapack64.Lange(CondNorm, m.mat, work) + rcond := lapack64.Gecon(CondNorm, m.mat, norm, work, ipiv) // reuse ipiv + if rcond == 0 { + return Condition(math.Inf(1)) + } + cond := 1 / rcond + if cond > ConditionTolerance { + return Condition(cond) + } + return nil +} + +// Mul takes the matrix product of a and b, placing the result in the receiver. +// If the number of columns in a does not equal the number of rows in b, Mul will panic. +func (m *Dense) Mul(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + + if ac != br { + panic(ErrShape) + } + + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + m.reuseAs(ar, bc) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + aT := blas.NoTrans + if aTrans { + aT = blas.Trans + } + bT := blas.NoTrans + if bTrans { + bT = blas.Trans + } + + // Some of the cases do not have a transpose option, so create + // temporary memory. + // C = A^T * B = (B^T * A)^T + // C^T = B^T * A. + if aUrm, ok := aU.(RawMatrixer); ok { + amat := aUrm.RawMatrix() + if restore == nil { + m.checkOverlap(amat) + } + if bUrm, ok := bU.(RawMatrixer); ok { + bmat := bUrm.RawMatrix() + if restore == nil { + m.checkOverlap(bmat) + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + if bU, ok := bU.(RawSymmetricer); ok { + bmat := bU.RawSymmetric() + if aTrans { + c := getWorkspace(ac, ar, false) + blas64.Symm(blas.Left, 1, bmat, amat, 0, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + blas64.Symm(blas.Right, 1, bmat, amat, 0, m.mat) + return + } + if bU, ok := bU.(RawTriangular); ok { + // Trmm updates in place, so copy aU first. + bmat := bU.RawTriangular() + if aTrans { + c := getWorkspace(ac, ar, false) + var tmp Dense + tmp.SetRawMatrix(amat) + c.Copy(&tmp) + bT := blas.Trans + if bTrans { + bT = blas.NoTrans + } + blas64.Trmm(blas.Left, bT, 1, bmat, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + m.Copy(a) + blas64.Trmm(blas.Right, bT, 1, bmat, m.mat) + return + } + if bU, ok := bU.(*VecDense); ok { + m.checkOverlap(bU.asGeneral()) + bvec := bU.RawVector() + if bTrans { + // {ar,1} x {1,bc}, which is not a vector. + // Instead, construct B as a General. + bmat := blas64.General{ + Rows: bc, + Cols: 1, + Stride: bvec.Inc, + Data: bvec.Data, + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + cvec := blas64.Vector{ + Inc: m.mat.Stride, + Data: m.mat.Data, + } + blas64.Gemv(aT, 1, amat, bvec, 0, cvec) + return + } + } + if bUrm, ok := bU.(RawMatrixer); ok { + bmat := bUrm.RawMatrix() + if restore == nil { + m.checkOverlap(bmat) + } + if aU, ok := aU.(RawSymmetricer); ok { + amat := aU.RawSymmetric() + if bTrans { + c := getWorkspace(bc, br, false) + blas64.Symm(blas.Right, 1, amat, bmat, 0, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + blas64.Symm(blas.Left, 1, amat, bmat, 0, m.mat) + return + } + if aU, ok := aU.(RawTriangular); ok { + // Trmm updates in place, so copy bU first. + amat := aU.RawTriangular() + if bTrans { + c := getWorkspace(bc, br, false) + var tmp Dense + tmp.SetRawMatrix(bmat) + c.Copy(&tmp) + aT := blas.Trans + if aTrans { + aT = blas.NoTrans + } + blas64.Trmm(blas.Right, aT, 1, amat, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + m.Copy(b) + blas64.Trmm(blas.Left, aT, 1, amat, m.mat) + return + } + if aU, ok := aU.(*VecDense); ok { + m.checkOverlap(aU.asGeneral()) + avec := aU.RawVector() + if aTrans { + // {1,ac} x {ac, bc} + // Transpose B so that the vector is on the right. + cvec := blas64.Vector{ + Inc: 1, + Data: m.mat.Data, + } + bT := blas.Trans + if bTrans { + bT = blas.NoTrans + } + blas64.Gemv(bT, 1, bmat, avec, 0, cvec) + return + } + // {ar,1} x {1,bc} which is not a vector result. + // Instead, construct A as a General. + amat := blas64.General{ + Rows: ar, + Cols: 1, + Stride: avec.Inc, + Data: avec.Data, + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + row := getFloats(ac, false) + defer putFloats(row) + for r := 0; r < ar; r++ { + for i := range row { + row[i] = a.At(r, i) + } + for c := 0; c < bc; c++ { + var v float64 + for i, e := range row { + v += e * b.At(i, c) + } + m.mat.Data[r*m.mat.Stride+c] = v + } + } +} + +// strictCopy copies a into m panicking if the shape of a and m differ. +func strictCopy(m *Dense, a Matrix) { + r, c := m.Copy(a) + if r != m.mat.Rows || c != m.mat.Cols { + // Panic with a string since this + // is not a user-facing panic. + panic(ErrShape.Error()) + } +} + +// Exp calculates the exponential of the matrix a, e^a, placing the result +// in the receiver. Exp will panic with matrix.ErrShape if a is not square. +func (m *Dense) Exp(a Matrix) { + // The implementation used here is from Functions of Matrices: Theory and Computation + // Chapter 10, Algorithm 10.20. https://doi.org/10.1137/1.9780898717778.ch10 + + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + + m.reuseAs(r, r) + if r == 1 { + m.mat.Data[0] = math.Exp(a.At(0, 0)) + return + } + + pade := []struct { + theta float64 + b []float64 + }{ + {theta: 0.015, b: []float64{ + 120, 60, 12, 1, + }}, + {theta: 0.25, b: []float64{ + 30240, 15120, 3360, 420, 30, 1, + }}, + {theta: 0.95, b: []float64{ + 17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1, + }}, + {theta: 2.1, b: []float64{ + 17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1, + }}, + } + + a1 := m + a1.Copy(a) + v := getWorkspace(r, r, true) + vraw := v.RawMatrix() + n := r * r + vvec := blas64.Vector{N: n, Inc: 1, Data: vraw.Data} + defer putWorkspace(v) + + u := getWorkspace(r, r, true) + uraw := u.RawMatrix() + uvec := blas64.Vector{N: n, Inc: 1, Data: uraw.Data} + defer putWorkspace(u) + + a2 := getWorkspace(r, r, false) + defer putWorkspace(a2) + + n1 := Norm(a, 1) + for i, t := range pade { + if n1 > t.theta { + continue + } + + // This loop only executes once, so + // this is not as horrible as it looks. + p := getWorkspace(r, r, true) + praw := p.RawMatrix() + pvec := blas64.Vector{N: n, Inc: 1, Data: praw.Data} + defer putWorkspace(p) + + for k := 0; k < r; k++ { + p.set(k, k, 1) + v.set(k, k, t.b[0]) + u.set(k, k, t.b[1]) + } + + a2.Mul(a1, a1) + for j := 0; j <= i; j++ { + p.Mul(p, a2) + blas64.Axpy(t.b[2*j+2], pvec, vvec) + blas64.Axpy(t.b[2*j+3], pvec, uvec) + } + u.Mul(a1, u) + + // Use p as a workspace here and + // rename u for the second call's + // receiver. + vmu, vpu := u, p + vpu.Add(v, u) + vmu.Sub(v, u) + + m.Solve(vmu, vpu) + return + } + + // Remaining Padé table line. + const theta13 = 5.4 + b := [...]float64{ + 64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, + 129060195264000, 10559470521600, 670442572800, 33522128640, + 1323241920, 40840800, 960960, 16380, 182, 1, + } + + s := math.Log2(n1 / theta13) + if s >= 0 { + s = math.Ceil(s) + a1.Scale(1/math.Pow(2, s), a1) + } + a2.Mul(a1, a1) + + i := getWorkspace(r, r, true) + for j := 0; j < r; j++ { + i.set(j, j, 1) + } + iraw := i.RawMatrix() + ivec := blas64.Vector{N: n, Inc: 1, Data: iraw.Data} + defer putWorkspace(i) + + a2raw := a2.RawMatrix() + a2vec := blas64.Vector{N: n, Inc: 1, Data: a2raw.Data} + + a4 := getWorkspace(r, r, false) + a4raw := a4.RawMatrix() + a4vec := blas64.Vector{N: n, Inc: 1, Data: a4raw.Data} + defer putWorkspace(a4) + a4.Mul(a2, a2) + + a6 := getWorkspace(r, r, false) + a6raw := a6.RawMatrix() + a6vec := blas64.Vector{N: n, Inc: 1, Data: a6raw.Data} + defer putWorkspace(a6) + a6.Mul(a2, a4) + + // V = A_6(b_12*A_6 + b_10*A_4 + b_8*A_2) + b_6*A_6 + b_4*A_4 + b_2*A_2 +b_0*I + blas64.Axpy(b[12], a6vec, vvec) + blas64.Axpy(b[10], a4vec, vvec) + blas64.Axpy(b[8], a2vec, vvec) + v.Mul(v, a6) + blas64.Axpy(b[6], a6vec, vvec) + blas64.Axpy(b[4], a4vec, vvec) + blas64.Axpy(b[2], a2vec, vvec) + blas64.Axpy(b[0], ivec, vvec) + + // U = A(A_6(b_13*A_6 + b_11*A_4 + b_9*A_2) + b_7*A_6 + b_5*A_4 + b_2*A_3 +b_1*I) + blas64.Axpy(b[13], a6vec, uvec) + blas64.Axpy(b[11], a4vec, uvec) + blas64.Axpy(b[9], a2vec, uvec) + u.Mul(u, a6) + blas64.Axpy(b[7], a6vec, uvec) + blas64.Axpy(b[5], a4vec, uvec) + blas64.Axpy(b[3], a2vec, uvec) + blas64.Axpy(b[1], ivec, uvec) + u.Mul(u, a1) + + // Use i as a workspace here and + // rename u for the second call's + // receiver. + vmu, vpu := u, i + vpu.Add(v, u) + vmu.Sub(v, u) + + m.Solve(vmu, vpu) + + for ; s > 0; s-- { + m.Mul(m, m) + } +} + +// Pow calculates the integral power of the matrix a to n, placing the result +// in the receiver. Pow will panic if n is negative or if a is not square. +func (m *Dense) Pow(a Matrix, n int) { + if n < 0 { + panic("matrix: illegal power") + } + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + + m.reuseAs(r, c) + + // Take possible fast paths. + switch n { + case 0: + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + m.mat.Data[i*m.mat.Stride+i] = 1 + } + return + case 1: + m.Copy(a) + return + case 2: + m.Mul(a, a) + return + } + + // Perform iterative exponentiation by squaring in work space. + w := getWorkspace(r, r, false) + w.Copy(a) + s := getWorkspace(r, r, false) + s.Copy(a) + x := getWorkspace(r, r, false) + for n--; n > 0; n >>= 1 { + if n&1 != 0 { + x.Mul(w, s) + w, x = x, w + } + if n != 1 { + x.Mul(s, s) + s, x = x, s + } + } + m.Copy(w) + putWorkspace(w) + putWorkspace(s) + putWorkspace(x) +} + +// Scale multiplies the elements of a by f, placing the result in the receiver. +// +// See the Scaler interface for more information. +func (m *Dense) Scale(f float64, a Matrix) { + ar, ac := a.Dims() + + m.reuseAs(ar, ac) + + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + amat := rm.RawMatrix() + if m == aU || m.checkOverlap(amat) { + var restore func() + m, restore = m.isolatedWorkspace(a) + defer restore() + } + if !aTrans { + for ja, jm := 0, 0; ja < ar*amat.Stride; ja, jm = ja+amat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v * f + } + } + } else { + for ja, jm := 0, 0; ja < ac*amat.Stride; ja, jm = ja+amat.Stride, jm+1 { + for i, v := range amat.Data[ja : ja+ar] { + m.mat.Data[i*m.mat.Stride+jm] = v * f + } + } + } + return + } + + m.checkOverlapMatrix(a) + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, f*a.At(r, c)) + } + } +} + +// Apply applies the function fn to each of the elements of a, placing the +// resulting matrix in the receiver. The function fn takes a row/column +// index and element value and returns some function of that tuple. +func (m *Dense) Apply(fn func(i, j int, v float64) float64, a Matrix) { + ar, ac := a.Dims() + + m.reuseAs(ar, ac) + + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + amat := rm.RawMatrix() + if m == aU || m.checkOverlap(amat) { + var restore func() + m, restore = m.isolatedWorkspace(a) + defer restore() + } + if !aTrans { + for j, ja, jm := 0, 0, 0; ja < ar*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = fn(j, i, v) + } + } + } else { + for j, ja, jm := 0, 0, 0; ja < ac*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+1 { + for i, v := range amat.Data[ja : ja+ar] { + m.mat.Data[i*m.mat.Stride+jm] = fn(i, j, v) + } + } + } + return + } + + m.checkOverlapMatrix(a) + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, fn(r, c, a.At(r, c))) + } + } +} + +// RankOne performs a rank-one update to the matrix a and stores the result +// in the receiver. If a is zero, see Outer. +// m = a + alpha * x * y' +func (m *Dense) RankOne(a Matrix, alpha float64, x, y Vector) { + ar, ac := a.Dims() + xr, xc := x.Dims() + if xr != ar || xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yr != ac || yc != 1 { + panic(ErrShape) + } + + if a != m { + aU, _ := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m.checkOverlap(rm.RawMatrix()) + } + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + m.checkOverlap((&VecDense{mat: xmat}).asGeneral()) + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + m.checkOverlap((&VecDense{mat: ymat}).asGeneral()) + } else { + fast = false + } + + if fast { + if m != a { + m.reuseAs(ar, ac) + m.Copy(a) + } + blas64.Ger(alpha, xmat, ymat, m.mat) + return + } + + m.reuseAs(ar, ac) + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + m.set(i, j, a.At(i, j)+alpha*x.AtVec(i)*y.AtVec(j)) + } + } +} + +// Outer calculates the outer product of the column vectors x and y, +// and stores the result in the receiver. +// m = alpha * x * y' +// In order to update an existing matrix, see RankOne. +func (m *Dense) Outer(alpha float64, x, y Vector) { + xr, xc := x.Dims() + if xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yc != 1 { + panic(ErrShape) + } + + r := xr + c := yr + + // Copied from reuseAs with use replaced by useZeroed + // and a final zero of the matrix elements if we pass + // the shape checks. + // TODO(kortschak): Factor out into reuseZeroedAs if + // we find another case that needs it. + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useZeroed(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + } else if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + m.checkOverlap((&VecDense{mat: xmat}).asGeneral()) + + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + m.checkOverlap((&VecDense{mat: ymat}).asGeneral()) + } else { + fast = false + } + + if fast { + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + } + blas64.Ger(alpha, xmat, ymat, m.mat) + return + } + + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + m.set(i, j, alpha*x.AtVec(i)*y.AtVec(j)) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/diagonal.go b/vendor/gonum.org/v1/gonum/mat/diagonal.go new file mode 100644 index 00000000..e9f074a7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/diagonal.go @@ -0,0 +1,311 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + diagDense *DiagDense + _ Matrix = diagDense + _ Diagonal = diagDense + _ MutableDiagonal = diagDense + _ Triangular = diagDense + _ TriBanded = diagDense + _ Symmetric = diagDense + _ SymBanded = diagDense + _ Banded = diagDense + _ RawBander = diagDense + _ RawSymBander = diagDense + + diag Diagonal + _ Matrix = diag + _ Diagonal = diag + _ Triangular = diag + _ TriBanded = diag + _ Symmetric = diag + _ SymBanded = diag + _ Banded = diag +) + +// Diagonal represents a diagonal matrix, that is a square matrix that only +// has non-zero terms on the diagonal. +type Diagonal interface { + Matrix + // Diag returns the number of rows/columns in the matrix. + Diag() int + + // Bandwidth and TBand are included in the Diagonal interface + // to allow the use of Diagonal types in banded functions. + // Bandwidth will always return (0, 0). + Bandwidth() (kl, ku int) + TBand() Banded + + // Triangle and TTri are included in the Diagonal interface + // to allow the use of Diagonal types in triangular functions. + Triangle() (int, TriKind) + TTri() Triangular + + // Symmetric and SymBand are included in the Diagonal interface + // to allow the use of Diagonal types in symmetric and banded symmetric + // functions respectively. + Symmetric() int + SymBand() (n, k int) + + // TriBand and TTriBand are included in the Diagonal interface + // to allow the use of Diagonal types in triangular banded functions. + TriBand() (n, k int, kind TriKind) + TTriBand() TriBanded +} + +// MutableDiagonal is a Diagonal matrix whose elements can be set. +type MutableDiagonal interface { + Diagonal + SetDiag(i int, v float64) +} + +// DiagDense represents a diagonal matrix in dense storage format. +type DiagDense struct { + mat blas64.Vector +} + +// NewDiagDense creates a new Diagonal matrix with n rows and n columns. +// The length of data must be n or data must be nil, otherwise NewDiagDense +// will panic. NewDiagDense will panic if n is zero. +func NewDiagDense(n int, data []float64) *DiagDense { + if n <= 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data == nil { + data = make([]float64, n) + } + if len(data) != n { + panic(ErrShape) + } + return &DiagDense{ + mat: blas64.Vector{N: n, Data: data, Inc: 1}, + } +} + +// Diag returns the dimension of the receiver. +func (d *DiagDense) Diag() int { + return d.mat.N +} + +// Dims returns the dimensions of the matrix. +func (d *DiagDense) Dims() (r, c int) { + return d.mat.N, d.mat.N +} + +// T returns the transpose of the matrix. +func (d *DiagDense) T() Matrix { + return d +} + +// TTri returns the transpose of the matrix. Note that Diagonal matrices are +// Upper by default. +func (d *DiagDense) TTri() Triangular { + return TransposeTri{d} +} + +// TBand performs an implicit transpose by returning the receiver inside a +// TransposeBand. +func (d *DiagDense) TBand() Banded { + return TransposeBand{d} +} + +// TTriBand performs an implicit transpose by returning the receiver inside a +// TransposeTriBand. Note that Diagonal matrices are Upper by default. +func (d *DiagDense) TTriBand() TriBanded { + return TransposeTriBand{d} +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +// These values are always zero for diagonal matrices. +func (d *DiagDense) Bandwidth() (kl, ku int) { + return 0, 0 +} + +// Symmetric implements the Symmetric interface. +func (d *DiagDense) Symmetric() int { + return d.mat.N +} + +// SymBand returns the number of rows/columns in the matrix, and the size of +// the bandwidth. +func (d *DiagDense) SymBand() (n, k int) { + return d.mat.N, 0 +} + +// Triangle implements the Triangular interface. +func (d *DiagDense) Triangle() (int, TriKind) { + return d.mat.N, Upper +} + +// TriBand returns the number of rows/columns in the matrix, the +// size of the bandwidth, and the orientation. Note that Diagonal matrices are +// Upper by default. +func (d *DiagDense) TriBand() (n, k int, kind TriKind) { + return d.mat.N, 0, Upper +} + +// Reset zeros the length of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (d *DiagDense) Reset() { + // No change of Inc or n to 0 may be + // made unless both are set to 0. + d.mat.Inc = 0 + d.mat.N = 0 + d.mat.Data = d.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (d *DiagDense) Zero() { + for i := 0; i < d.mat.N; i++ { + d.mat.Data[d.mat.Inc*i] = 0 + } +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (d *DiagDense) DiagView() Diagonal { + return d +} + +// DiagFrom copies the diagonal of m into the receiver. The receiver must +// be min(r, c) long or zero. Otherwise DiagFrom will panic. +func (d *DiagDense) DiagFrom(m Matrix) { + n := min(m.Dims()) + d.reuseAs(n) + + var vec blas64.Vector + switch r := m.(type) { + case *DiagDense: + vec = r.mat + case RawBander: + mat := r.RawBand() + vec = blas64.Vector{ + N: n, + Inc: mat.Stride, + Data: mat.Data[mat.KL : (n-1)*mat.Stride+mat.KL+1], + } + case RawMatrixer: + mat := r.RawMatrix() + vec = blas64.Vector{ + N: n, + Inc: mat.Stride + 1, + Data: mat.Data[:(n-1)*mat.Stride+n], + } + case RawSymBander: + mat := r.RawSymBand() + vec = blas64.Vector{ + N: n, + Inc: mat.Stride, + Data: mat.Data[:(n-1)*mat.Stride+1], + } + case RawSymmetricer: + mat := r.RawSymmetric() + vec = blas64.Vector{ + N: n, + Inc: mat.Stride + 1, + Data: mat.Data[:(n-1)*mat.Stride+n], + } + case RawTriBander: + mat := r.RawTriBand() + data := mat.Data + if mat.Uplo == blas.Lower { + data = data[mat.K:] + } + vec = blas64.Vector{ + N: n, + Inc: mat.Stride, + Data: data[:(n-1)*mat.Stride+1], + } + case RawTriangular: + mat := r.RawTriangular() + if mat.Diag == blas.Unit { + for i := 0; i < n; i += d.mat.Inc { + d.mat.Data[i] = 1 + } + return + } + vec = blas64.Vector{ + N: n, + Inc: mat.Stride + 1, + Data: mat.Data[:(n-1)*mat.Stride+n], + } + case RawVectorer: + d.mat.Data[0] = r.RawVector().Data[0] + return + default: + for i := 0; i < n; i++ { + d.setDiag(i, m.At(i, i)) + } + return + } + blas64.Copy(vec, d.mat) +} + +// RawBand returns the underlying data used by the receiver represented +// as a blas64.Band. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.Band. +func (d *DiagDense) RawBand() blas64.Band { + return blas64.Band{ + Rows: d.mat.N, + Cols: d.mat.N, + KL: 0, + KU: 0, + Stride: d.mat.Inc, + Data: d.mat.Data, + } +} + +// RawSymBand returns the underlying data used by the receiver represented +// as a blas64.SymmetricBand. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.Band. +func (d *DiagDense) RawSymBand() blas64.SymmetricBand { + return blas64.SymmetricBand{ + N: d.mat.N, + K: 0, + Stride: d.mat.Inc, + Uplo: blas.Upper, + Data: d.mat.Data, + } +} + +// reuseAs resizes an empty diagonal to a r×r diagonal, +// or checks that a non-empty matrix is r×r. +func (d *DiagDense) reuseAs(r int) { + if r == 0 { + panic(ErrZeroLength) + } + if d.IsZero() { + d.mat = blas64.Vector{ + Inc: 1, + Data: use(d.mat.Data, r), + } + d.mat.N = r + return + } + if r != d.mat.N { + panic(ErrShape) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized vectors can be the +// receiver for size-restricted operations. DiagDenses can be zeroed using Reset. +func (d *DiagDense) IsZero() bool { + // It must be the case that d.Dims() returns + // zeros in this case. See comment in Reset(). + return d.mat.Inc == 0 +} diff --git a/vendor/gonum.org/v1/gonum/mat/doc.go b/vendor/gonum.org/v1/gonum/mat/doc.go new file mode 100644 index 00000000..2cc91001 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/doc.go @@ -0,0 +1,169 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mat provides implementations of float64 and complex128 matrix +// structures and linear algebra operations on them. +// +// Overview +// +// This section provides a quick overview of the mat package. The following +// sections provide more in depth commentary. +// +// mat provides: +// - Interfaces for Matrix classes (Matrix, Symmetric, Triangular) +// - Concrete implementations (Dense, SymDense, TriDense) +// - Methods and functions for using matrix data (Add, Trace, SymRankOne) +// - Types for constructing and using matrix factorizations (QR, LU) +// - The complementary types for complex matrices, CMatrix, CSymDense, etc. +// +// A matrix may be constructed through the corresponding New function. If no +// backing array is provided the matrix will be initialized to all zeros. +// // Allocate a zeroed real matrix of size 3×5 +// zero := mat.NewDense(3, 5, nil) +// If a backing data slice is provided, the matrix will have those elements. +// Matrices are all stored in row-major format. +// // Generate a 6×6 matrix of random values. +// data := make([]float64, 36) +// for i := range data { +// data[i] = rand.NormFloat64() +// } +// a := mat.NewDense(6, 6, data) +// Operations involving matrix data are implemented as functions when the values +// of the matrix remain unchanged +// tr := mat.Trace(a) +// and are implemented as methods when the operation modifies the receiver. +// zero.Copy(a) +// +// Receivers must be the correct size for the matrix operations, otherwise the +// operation will panic. As a special case for convenience, a zero-value matrix +// will be modified to have the correct size, allocating data if necessary. +// var c mat.Dense // construct a new zero-sized matrix +// c.Mul(a, a) // c is automatically adjusted to be 6×6 +// +// Zero-value of a matrix +// +// A zero-value matrix is either the Go language definition of a zero-value or +// is a zero-sized matrix with zero-length stride. Matrix implementations may have +// a Reset method to revert the receiver into a zero-valued matrix and an IsZero +// method that returns whether the matrix is zero-valued. +// So the following will all result in a zero-value matrix. +// - var a mat.Dense +// - a := NewDense(0, 0, make([]float64, 0, 100)) +// - a.Reset() +// A zero-value matrix can not be sliced even if it does have an adequately sized +// backing data slice, but can be expanded using its Grow method if it exists. +// +// The Matrix Interfaces +// +// The Matrix interface is the common link between the concrete types of real +// matrices, The Matrix interface is defined by three functions: Dims, which +// returns the dimensions of the Matrix, At, which returns the element in the +// specified location, and T for returning a Transpose (discussed later). All of +// the concrete types can perform these behaviors and so implement the interface. +// Methods and functions are designed to use this interface, so in particular the method +// func (m *Dense) Mul(a, b Matrix) +// constructs a *Dense from the result of a multiplication with any Matrix types, +// not just *Dense. Where more restrictive requirements must be met, there are also the +// Symmetric and Triangular interfaces. For example, in +// func (s *SymDense) AddSym(a, b Symmetric) +// the Symmetric interface guarantees a symmetric result. +// +// The CMatrix interface plays the same role for complex matrices. The difference +// is that the CMatrix type has the H method instead T, for returning the conjugate +// transpose. +// +// (Conjugate) Transposes +// +// The T method is used for transposition on real matrices, and H is used for +// conjugate transposition on complex matrices. For example, c.Mul(a.T(), b) computes +// c = a^T * b. The mat types implement this method implicitly — +// see the Transpose and Conjugate types for more details. Note that some +// operations have a transpose as part of their definition, as in *SymDense.SymOuterK. +// +// Matrix Factorization +// +// Matrix factorizations, such as the LU decomposition, typically have their own +// specific data storage, and so are each implemented as a specific type. The +// factorization can be computed through a call to Factorize +// var lu mat.LU +// lu.Factorize(a) +// The elements of the factorization can be extracted through methods on the +// factorized type, i.e. *LU.UTo. The factorization types can also be used directly, +// as in *Dense.SolveCholesky. Some factorizations can be updated directly, +// without needing to update the original matrix and refactorize, +// as in *LU.RankOne. +// +// BLAS and LAPACK +// +// BLAS and LAPACK are the standard APIs for linear algebra routines. Many +// operations in mat are implemented using calls to the wrapper functions +// in gonum/blas/blas64 and gonum/lapack/lapack64 and their complex equivalents. +// By default, blas64 and lapack64 call the native Go implementations of the +// routines. Alternatively, it is possible to use C-based implementations of the +// APIs through the respective cgo packages and "Use" functions. The Go +// implementation of LAPACK (used by default) makes calls +// through blas64, so if a cgo BLAS implementation is registered, the lapack64 +// calls will be partially executed in Go and partially executed in C. +// +// Type Switching +// +// The Matrix abstraction enables efficiency as well as interoperability. Go's +// type reflection capabilities are used to choose the most efficient routine +// given the specific concrete types. For example, in +// c.Mul(a, b) +// if a and b both implement RawMatrixer, that is, they can be represented as a +// blas64.General, blas64.Gemm (general matrix multiplication) is called, while +// instead if b is a RawSymmetricer blas64.Symm is used (general-symmetric +// multiplication), and if b is a *VecDense blas64.Gemv is used. +// +// There are many possible type combinations and special cases. No specific guarantees +// are made about the performance of any method, and in particular, note that an +// abstract matrix type may be copied into a concrete type of the corresponding +// value. If there are specific special cases that are needed, please submit a +// pull-request or file an issue. +// +// Invariants +// +// Matrix input arguments to functions are never directly modified. If an operation +// changes Matrix data, the mutated matrix will be the receiver of a function. +// +// For convenience, a matrix may be used as both a receiver and as an input, e.g. +// a.Pow(a, 6) +// v.SolveVec(a.T(), v) +// though in many cases this will cause an allocation (see Element Aliasing). +// An exception to this rule is Copy, which does not allow a.Copy(a.T()). +// +// Element Aliasing +// +// Most methods in mat modify receiver data. It is forbidden for the modified +// data region of the receiver to overlap the used data area of the input +// arguments. The exception to this rule is when the method receiver is equal to one +// of the input arguments, as in the a.Pow(a, 6) call above, or its implicit transpose. +// +// This prohibition is to help avoid subtle mistakes when the method needs to read +// from and write to the same data region. There are ways to make mistakes using the +// mat API, and mat functions will detect and complain about those. +// There are many ways to make mistakes by excursion from the mat API via +// interaction with raw matrix values. +// +// If you need to read the rest of this section to understand the behavior of +// your program, you are being clever. Don't be clever. If you must be clever, +// blas64 and lapack64 may be used to call the behavior directly. +// +// mat will use the following rules to detect overlap between the receiver and one +// of the inputs: +// - the input implements one of the Raw methods, and +// - the address ranges of the backing data slices overlap, and +// - the strides differ or there is an overlap in the used data elements. +// If such an overlap is detected, the method will panic. +// +// The following cases will not panic: +// - the data slices do not overlap, +// - there is pointer identity between the receiver and input values after +// the value has been untransposed if necessary. +// +// mat will not attempt to detect element overlap if the input does not implement a +// Raw method. Method behavior is undefined if there is undetected overlap. +// +package mat // import "gonum.org/v1/gonum/mat" diff --git a/vendor/gonum.org/v1/gonum/mat/eigen.go b/vendor/gonum.org/v1/gonum/mat/eigen.go new file mode 100644 index 00000000..ee971e4a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/eigen.go @@ -0,0 +1,350 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const ( + badFact = "mat: use without successful factorization" + badNoVect = "mat: eigenvectors not computed" +) + +// EigenSym is a type for creating and manipulating the Eigen decomposition of +// symmetric matrices. +type EigenSym struct { + vectorsComputed bool + + values []float64 + vectors *Dense +} + +// Factorize computes the eigenvalue decomposition of the symmetric matrix a. +// The Eigen decomposition is defined as +// A = P * D * P^-1 +// where D is a diagonal matrix containing the eigenvalues of the matrix, and +// P is a matrix of the eigenvectors of A. Factorize computes the eigenvalues +// in ascending order. If the vectors input argument is false, the eigenvectors +// are not computed. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, methods that require a successful factorization will panic. +func (e *EigenSym) Factorize(a Symmetric, vectors bool) (ok bool) { + // kill previous decomposition + e.vectorsComputed = false + e.values = e.values[:] + + n := a.Symmetric() + sd := NewSymDense(n, nil) + sd.CopySym(a) + + jobz := lapack.EVNone + if vectors { + jobz = lapack.EVCompute + } + w := make([]float64, n) + work := []float64{0} + lapack64.Syev(jobz, sd.mat, w, work, -1) + + work = getFloats(int(work[0]), false) + ok = lapack64.Syev(jobz, sd.mat, w, work, len(work)) + putFloats(work) + if !ok { + e.vectorsComputed = false + e.values = nil + e.vectors = nil + return false + } + e.vectorsComputed = vectors + e.values = w + e.vectors = NewDense(n, n, sd.mat.Data) + return true +} + +// succFact returns whether the receiver contains a successful factorization. +func (e *EigenSym) succFact() bool { + return len(e.values) != 0 +} + +// Values extracts the eigenvalues of the factorized matrix. If dst is +// non-nil, the values are stored in-place into dst. In this case +// dst must have length n, otherwise Values will panic. If dst is +// nil, then a new slice will be allocated of the proper length and filled +// with the eigenvalues. +// +// Values panics if the Eigen decomposition was not successful. +func (e *EigenSym) Values(dst []float64) []float64 { + if !e.succFact() { + panic(badFact) + } + if dst == nil { + dst = make([]float64, len(e.values)) + } + if len(dst) != len(e.values) { + panic(ErrSliceLengthMismatch) + } + copy(dst, e.values) + return dst +} + +// VectorsTo returns the eigenvectors of the decomposition. VectorsTo +// will panic if the eigenvectors were not computed during the factorization, +// or if the factorization was not successful. +// +// If dst is not nil, the eigenvectors are stored in-place into dst, and dst +// must have size n×n and panics otherwise. If dst is nil, a new matrix +// is allocated and returned. +func (e *EigenSym) VectorsTo(dst *Dense) *Dense { + if !e.succFact() { + panic(badFact) + } + if !e.vectorsComputed { + panic(badNoVect) + } + r, c := e.vectors.Dims() + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + dst.Copy(e.vectors) + return dst +} + +// EigenKind specifies the computation of eigenvectors during factorization. +type EigenKind int + +const ( + // EigenNone specifies to not compute any eigenvectors. + EigenNone EigenKind = 0 + // EigenLeft specifies to compute the left eigenvectors. + EigenLeft EigenKind = 1 << iota + // EigenRight specifies to compute the right eigenvectors. + EigenRight + // EigenBoth is a convenience value for computing both eigenvectors. + EigenBoth EigenKind = EigenLeft | EigenRight +) + +// Eigen is a type for creating and using the eigenvalue decomposition of a dense matrix. +type Eigen struct { + n int // The size of the factorized matrix. + + kind EigenKind + + values []complex128 + rVectors *CDense + lVectors *CDense +} + +// succFact returns whether the receiver contains a successful factorization. +func (e *Eigen) succFact() bool { + return e.n != 0 +} + +// Factorize computes the eigenvalues of the square matrix a, and optionally +// the eigenvectors. +// +// A right eigenvalue/eigenvector combination is defined by +// A * x_r = λ * x_r +// where x_r is the column vector called an eigenvector, and λ is the corresponding +// eigenvalue. +// +// Similarly, a left eigenvalue/eigenvector combination is defined by +// x_l * A = λ * x_l +// The eigenvalues, but not the eigenvectors, are the same for both decompositions. +// +// Typically eigenvectors refer to right eigenvectors. +// +// In all cases, Factorize computes the eigenvalues of the matrix. kind +// specifies which of the eigenvectors, if any, to compute. See the EigenKind +// documentation for more information. +// Eigen panics if the input matrix is not square. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, methods that require a successful factorization will panic. +func (e *Eigen) Factorize(a Matrix, kind EigenKind) (ok bool) { + // kill previous factorization. + e.n = 0 + e.kind = 0 + // Copy a because it is modified during the Lapack call. + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + var sd Dense + sd.Clone(a) + + left := kind&EigenLeft != 0 + right := kind&EigenRight != 0 + + var vl, vr Dense + jobvl := lapack.LeftEVNone + jobvr := lapack.RightEVNone + if left { + vl = *NewDense(r, r, nil) + jobvl = lapack.LeftEVCompute + } + if right { + vr = *NewDense(c, c, nil) + jobvr = lapack.RightEVCompute + } + + wr := getFloats(c, false) + defer putFloats(wr) + wi := getFloats(c, false) + defer putFloats(wi) + + work := []float64{0} + lapack64.Geev(jobvl, jobvr, sd.mat, wr, wi, vl.mat, vr.mat, work, -1) + work = getFloats(int(work[0]), false) + first := lapack64.Geev(jobvl, jobvr, sd.mat, wr, wi, vl.mat, vr.mat, work, len(work)) + putFloats(work) + + if first != 0 { + e.values = nil + return false + } + e.n = r + e.kind = kind + + // Construct complex eigenvalues from float64 data. + values := make([]complex128, r) + for i, v := range wr { + values[i] = complex(v, wi[i]) + } + e.values = values + + // Construct complex eigenvectors from float64 data. + var cvl, cvr CDense + if left { + cvl = *NewCDense(r, r, nil) + e.complexEigenTo(&cvl, &vl) + e.lVectors = &cvl + } else { + e.lVectors = nil + } + if right { + cvr = *NewCDense(c, c, nil) + e.complexEigenTo(&cvr, &vr) + e.rVectors = &cvr + } else { + e.rVectors = nil + } + return true +} + +// Kind returns the EigenKind of the decomposition. If no decomposition has been +// computed, Kind returns -1. +func (e *Eigen) Kind() EigenKind { + if !e.succFact() { + return -1 + } + return e.kind +} + +// Values extracts the eigenvalues of the factorized matrix. If dst is +// non-nil, the values are stored in-place into dst. In this case +// dst must have length n, otherwise Values will panic. If dst is +// nil, then a new slice will be allocated of the proper length and +// filed with the eigenvalues. +// +// Values panics if the Eigen decomposition was not successful. +func (e *Eigen) Values(dst []complex128) []complex128 { + if !e.succFact() { + panic(badFact) + } + if dst == nil { + dst = make([]complex128, e.n) + } + if len(dst) != e.n { + panic(ErrSliceLengthMismatch) + } + copy(dst, e.values) + return dst +} + +// complexEigenTo extracts the complex eigenvectors from the real matrix d +// and stores them into the complex matrix dst. +// +// The columns of the returned n×n dense matrix contain the eigenvectors of the +// decomposition in the same order as the eigenvalues. +// If the j-th eigenvalue is real, then +// dst[:,j] = d[:,j], +// and if it is not real, then the elements of the j-th and (j+1)-th columns of d +// form complex conjugate pairs and the eigenvectors are recovered as +// dst[:,j] = d[:,j] + i*d[:,j+1], +// dst[:,j+1] = d[:,j] - i*d[:,j+1], +// where i is the imaginary unit. +func (e *Eigen) complexEigenTo(dst *CDense, d *Dense) { + r, c := d.Dims() + cr, cc := dst.Dims() + if r != cr { + panic("size mismatch") + } + if c != cc { + panic("size mismatch") + } + for j := 0; j < c; j++ { + if imag(e.values[j]) == 0 { + for i := 0; i < r; i++ { + dst.set(i, j, complex(d.at(i, j), 0)) + } + continue + } + for i := 0; i < r; i++ { + real := d.at(i, j) + imag := d.at(i, j+1) + dst.set(i, j, complex(real, imag)) + dst.set(i, j+1, complex(real, -imag)) + } + j++ + } +} + +// VectorsTo returns the right eigenvectors of the decomposition. VectorsTo +// will panic if the right eigenvectors were not computed during the factorization, +// or if the factorization was not successful. +// +// The computed eigenvectors are normalized to have Euclidean norm equal to 1 +// and largest component real. +func (e *Eigen) VectorsTo(dst *CDense) *CDense { + if !e.succFact() { + panic(badFact) + } + if e.kind&EigenRight == 0 { + panic(badNoVect) + } + if dst == nil { + dst = NewCDense(e.n, e.n, nil) + } else { + dst.reuseAs(e.n, e.n) + } + dst.Copy(e.rVectors) + return dst +} + +// LeftVectorsTo returns the left eigenvectors of the decomposition. LeftVectorsTo +// will panic if the left eigenvectors were not computed during the factorization, +// or if the factorization was not successful. +// +// The computed eigenvectors are normalized to have Euclidean norm equal to 1 +// and largest component real. +func (e *Eigen) LeftVectorsTo(dst *CDense) *CDense { + if !e.succFact() { + panic(badFact) + } + if e.kind&EigenLeft == 0 { + panic(badNoVect) + } + if dst == nil { + dst = NewCDense(e.n, e.n, nil) + } else { + dst.reuseAs(e.n, e.n) + } + dst.Copy(e.lVectors) + return dst +} diff --git a/vendor/gonum.org/v1/gonum/mat/errors.go b/vendor/gonum.org/v1/gonum/mat/errors.go new file mode 100644 index 00000000..0430d126 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/errors.go @@ -0,0 +1,149 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "fmt" + "runtime" + + "gonum.org/v1/gonum/lapack" +) + +// Condition is the condition number of a matrix. The condition +// number is defined as |A| * |A^-1|. +// +// One important use of Condition is during linear solve routines (finding x such +// that A * x = b). The condition number of A indicates the accuracy of +// the computed solution. A Condition error will be returned if the condition +// number of A is sufficiently large. If A is exactly singular to working precision, +// Condition == ∞, and the solve algorithm may have completed early. If Condition +// is large and finite the solve algorithm will be performed, but the computed +// solution may be innacurate. Due to the nature of finite precision arithmetic, +// the value of Condition is only an approximate test of singularity. +type Condition float64 + +func (c Condition) Error() string { + return fmt.Sprintf("matrix singular or near-singular with condition number %.4e", c) +} + +// ConditionTolerance is the tolerance limit of the condition number. If the +// condition number is above this value, the matrix is considered singular. +const ConditionTolerance = 1e16 + +const ( + // CondNorm is the matrix norm used for computing the condition number by routines + // in the matrix packages. + CondNorm = lapack.MaxRowSum + + // CondNormTrans is the norm used to compute on A^T to get the same result as + // computing CondNorm on A. + CondNormTrans = lapack.MaxColumnSum +) + +const stackTraceBufferSize = 1 << 20 + +// Maybe will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func Maybe(fn func()) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + fn() + return +} + +// MaybeFloat will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func MaybeFloat(fn func() float64) (f float64, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + return fn(), nil +} + +// MaybeComplex will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func MaybeComplex(fn func() complex128) (f complex128, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + return fn(), nil +} + +// Error represents matrix handling errors. These errors can be recovered by Maybe wrappers. +type Error struct{ string } + +func (err Error) Error() string { return err.string } + +var ( + ErrIndexOutOfRange = Error{"matrix: index out of range"} + ErrRowAccess = Error{"matrix: row index out of range"} + ErrColAccess = Error{"matrix: column index out of range"} + ErrVectorAccess = Error{"matrix: vector index out of range"} + ErrZeroLength = Error{"matrix: zero length in matrix dimension"} + ErrRowLength = Error{"matrix: row length mismatch"} + ErrColLength = Error{"matrix: col length mismatch"} + ErrSquare = Error{"matrix: expect square matrix"} + ErrNormOrder = Error{"matrix: invalid norm order for matrix"} + ErrSingular = Error{"matrix: matrix is singular"} + ErrShape = Error{"matrix: dimension mismatch"} + ErrIllegalStride = Error{"matrix: illegal stride"} + ErrPivot = Error{"matrix: malformed pivot list"} + ErrTriangle = Error{"matrix: triangular storage mismatch"} + ErrTriangleSet = Error{"matrix: triangular set out of bounds"} + ErrBandSet = Error{"matrix: band set out of bounds"} + ErrDiagSet = Error{"matrix: diagonal set out of bounds"} + ErrSliceLengthMismatch = Error{"matrix: input slice length mismatch"} + ErrNotPSD = Error{"matrix: input not positive symmetric definite"} + ErrFailedEigen = Error{"matrix: eigendecomposition not successful"} +) + +// ErrorStack represents matrix handling errors that have been recovered by Maybe wrappers. +type ErrorStack struct { + Err error + + // StackTrace is the stack trace + // recovered by Maybe, MaybeFloat + // or MaybeComplex. + StackTrace string +} + +func (err ErrorStack) Error() string { return err.Err.Error() } diff --git a/vendor/gonum.org/v1/gonum/mat/format.go b/vendor/gonum.org/v1/gonum/mat/format.go new file mode 100644 index 00000000..9b60cb31 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/format.go @@ -0,0 +1,238 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "fmt" + "strconv" +) + +// Formatted returns a fmt.Formatter for the matrix m using the given options. +func Formatted(m Matrix, options ...FormatOption) fmt.Formatter { + f := formatter{ + matrix: m, + dot: '.', + } + for _, o := range options { + o(&f) + } + return f +} + +type formatter struct { + matrix Matrix + prefix string + margin int + dot byte + squeeze bool +} + +// FormatOption is a functional option for matrix formatting. +type FormatOption func(*formatter) + +// Prefix sets the formatted prefix to the string p. Prefix is a string that is prepended to +// each line of output. +func Prefix(p string) FormatOption { + return func(f *formatter) { f.prefix = p } +} + +// Excerpt sets the maximum number of rows and columns to print at the margins of the matrix +// to m. If m is zero or less all elements are printed. +func Excerpt(m int) FormatOption { + return func(f *formatter) { f.margin = m } +} + +// DotByte sets the dot character to b. The dot character is used to replace zero elements +// if the result is printed with the fmt ' ' verb flag. Without a DotByte option, the default +// dot character is '.'. +func DotByte(b byte) FormatOption { + return func(f *formatter) { f.dot = b } +} + +// Squeeze sets the printing behaviour to minimise column width for each individual column. +func Squeeze() FormatOption { + return func(f *formatter) { f.squeeze = true } +} + +// Format satisfies the fmt.Formatter interface. +func (f formatter) Format(fs fmt.State, c rune) { + if c == 'v' && fs.Flag('#') { + fmt.Fprintf(fs, "%#v", f.matrix) + return + } + format(f.matrix, f.prefix, f.margin, f.dot, f.squeeze, fs, c) +} + +// format prints a pretty representation of m to the fs io.Writer. The format character c +// specifies the numerical representation of elements; valid values are those for float64 +// specified in the fmt package, with their associated flags. In addition to this, a space +// preceding a verb indicates that zero values should be represented by the dot character. +// The printed range of the matrix can be limited by specifying a positive value for margin; +// If margin is greater than zero, only the first and last margin rows/columns of the matrix +// are output. If squeeze is true, column widths are determined on a per-column basis. +// +// format will not provide Go syntax output. +func format(m Matrix, prefix string, margin int, dot byte, squeeze bool, fs fmt.State, c rune) { + rows, cols := m.Dims() + + var printed int + if margin <= 0 { + printed = rows + if cols > printed { + printed = cols + } + } else { + printed = margin + } + + prec, pOk := fs.Precision() + if !pOk { + prec = -1 + } + + var ( + maxWidth int + widths widther + buf, pad []byte + ) + if squeeze { + widths = make(columnWidth, cols) + } else { + widths = new(uniformWidth) + } + switch c { + case 'v', 'e', 'E', 'f', 'F', 'g', 'G': + if c == 'v' { + buf, maxWidth = maxCellWidth(m, 'g', printed, prec, widths) + } else { + buf, maxWidth = maxCellWidth(m, c, printed, prec, widths) + } + default: + fmt.Fprintf(fs, "%%!%c(%T=Dims(%d, %d))", c, m, rows, cols) + return + } + width, _ := fs.Width() + width = max(width, maxWidth) + pad = make([]byte, max(width, 2)) + for i := range pad { + pad[i] = ' ' + } + + first := true + if rows > 2*printed || cols > 2*printed { + first = false + fmt.Fprintf(fs, "Dims(%d, %d)\n", rows, cols) + } + + skipZero := fs.Flag(' ') + for i := 0; i < rows; i++ { + if !first { + fmt.Fprint(fs, prefix) + } + first = false + var el string + switch { + case rows == 1: + fmt.Fprint(fs, "[") + el = "]" + case i == 0: + fmt.Fprint(fs, "⎡") + el = "⎤\n" + case i < rows-1: + fmt.Fprint(fs, "⎢") + el = "⎥\n" + default: + fmt.Fprint(fs, "⎣") + el = "⎦" + } + + for j := 0; j < cols; j++ { + if j >= printed && j < cols-printed { + j = cols - printed - 1 + if i == 0 || i == rows-1 { + fmt.Fprint(fs, "... ... ") + } else { + fmt.Fprint(fs, " ") + } + continue + } + + v := m.At(i, j) + if v == 0 && skipZero { + buf = buf[:1] + buf[0] = dot + } else { + if c == 'v' { + buf = strconv.AppendFloat(buf[:0], v, 'g', prec, 64) + } else { + buf = strconv.AppendFloat(buf[:0], v, byte(c), prec, 64) + } + } + if fs.Flag('-') { + fs.Write(buf) + fs.Write(pad[:widths.width(j)-len(buf)]) + } else { + fs.Write(pad[:widths.width(j)-len(buf)]) + fs.Write(buf) + } + + if j < cols-1 { + fs.Write(pad[:2]) + } + } + + fmt.Fprint(fs, el) + + if i >= printed-1 && i < rows-printed && 2*printed < rows { + i = rows - printed - 1 + fmt.Fprintf(fs, "%s .\n%[1]s .\n%[1]s .\n", prefix) + continue + } + } +} + +func maxCellWidth(m Matrix, c rune, printed, prec int, w widther) ([]byte, int) { + var ( + buf = make([]byte, 0, 64) + rows, cols = m.Dims() + max int + ) + for i := 0; i < rows; i++ { + if i >= printed-1 && i < rows-printed && 2*printed < rows { + i = rows - printed - 1 + continue + } + for j := 0; j < cols; j++ { + if j >= printed && j < cols-printed { + continue + } + + buf = strconv.AppendFloat(buf, m.At(i, j), byte(c), prec, 64) + if len(buf) > max { + max = len(buf) + } + if len(buf) > w.width(j) { + w.setWidth(j, len(buf)) + } + buf = buf[:0] + } + } + return buf, max +} + +type widther interface { + width(i int) int + setWidth(i, w int) +} + +type uniformWidth int + +func (u *uniformWidth) width(_ int) int { return int(*u) } +func (u *uniformWidth) setWidth(_, w int) { *u = uniformWidth(w) } + +type columnWidth []int + +func (c columnWidth) width(i int) int { return c[i] } +func (c columnWidth) setWidth(i, w int) { c[i] = w } diff --git a/vendor/gonum.org/v1/gonum/mat/gsvd.go b/vendor/gonum.org/v1/gonum/mat/gsvd.go new file mode 100644 index 00000000..2de511a9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/gsvd.go @@ -0,0 +1,415 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// GSVDKind specifies the treatment of singular vectors during a GSVD +// factorization. +type GSVDKind int + +const ( + // GSVDNone specifies that no singular vectors should be computed during + // the decomposition. + GSVDNone GSVDKind = 0 + + // GSVDU specifies that the U singular vectors should be computed during + // the decomposition. + GSVDU GSVDKind = 1 << iota + // GSVDV specifies that the V singular vectors should be computed during + // the decomposition. + GSVDV + // GSVDQ specifies that the Q singular vectors should be computed during + // the decomposition. + GSVDQ + + // GSVDAll is a convenience value for computing all of the singular vectors. + GSVDAll = GSVDU | GSVDV | GSVDQ +) + +// GSVD is a type for creating and using the Generalized Singular Value Decomposition +// (GSVD) of a matrix. +// +// The factorization is a linear transformation of the data sets from the given +// variable×sample spaces to reduced and diagonalized "eigenvariable"×"eigensample" +// spaces. +type GSVD struct { + kind GSVDKind + + r, p, c, k, l int + s1, s2 []float64 + a, b, u, v, q blas64.General + + work []float64 + iwork []int +} + +// succFact returns whether the receiver contains a successful factorization. +func (gsvd *GSVD) succFact() bool { + return gsvd.r != 0 +} + +// Factorize computes the generalized singular value decomposition (GSVD) of the input +// the r×c matrix A and the p×c matrix B. The singular values of A and B are computed +// in all cases, while the singular vectors are optionally computed depending on the +// input kind. +// +// The full singular value decomposition (kind == GSVDAll) deconstructs A and B as +// A = U * Σ₁ * [ 0 R ] * Q^T +// +// B = V * Σ₂ * [ 0 R ] * Q^T +// where Σ₁ and Σ₂ are r×(k+l) and p×(k+l) diagonal matrices of singular values, and +// U, V and Q are r×r, p×p and c×c orthogonal matrices of singular vectors. k+l is the +// effective numerical rank of the matrix [ A^T B^T ]^T. +// +// It is frequently not necessary to compute the full GSVD. Computation time and +// storage costs can be reduced using the appropriate kind. Either only the singular +// values can be computed (kind == SVDNone), or in conjunction with specific singular +// vectors (kind bit set according to matrix.GSVDU, matrix.GSVDV and matrix.GSVDQ). +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (gsvd *GSVD) Factorize(a, b Matrix, kind GSVDKind) (ok bool) { + // kill the previous decomposition + gsvd.r = 0 + gsvd.kind = 0 + + r, c := a.Dims() + gsvd.r, gsvd.c = r, c + p, c := b.Dims() + gsvd.p = p + if gsvd.c != c { + panic(ErrShape) + } + var jobU, jobV, jobQ lapack.GSVDJob + switch { + default: + panic("gsvd: bad input kind") + case kind == GSVDNone: + jobU = lapack.GSVDNone + jobV = lapack.GSVDNone + jobQ = lapack.GSVDNone + case GSVDAll&kind != 0: + if GSVDU&kind != 0 { + jobU = lapack.GSVDU + gsvd.u = blas64.General{ + Rows: r, + Cols: r, + Stride: r, + Data: use(gsvd.u.Data, r*r), + } + } + if GSVDV&kind != 0 { + jobV = lapack.GSVDV + gsvd.v = blas64.General{ + Rows: p, + Cols: p, + Stride: p, + Data: use(gsvd.v.Data, p*p), + } + } + if GSVDQ&kind != 0 { + jobQ = lapack.GSVDQ + gsvd.q = blas64.General{ + Rows: c, + Cols: c, + Stride: c, + Data: use(gsvd.q.Data, c*c), + } + } + } + + // A and B are destroyed on call, so copy the matrices. + aCopy := DenseCopyOf(a) + bCopy := DenseCopyOf(b) + + gsvd.s1 = use(gsvd.s1, c) + gsvd.s2 = use(gsvd.s2, c) + + gsvd.iwork = useInt(gsvd.iwork, c) + + gsvd.work = use(gsvd.work, 1) + lapack64.Ggsvd3(jobU, jobV, jobQ, aCopy.mat, bCopy.mat, gsvd.s1, gsvd.s2, gsvd.u, gsvd.v, gsvd.q, gsvd.work, -1, gsvd.iwork) + gsvd.work = use(gsvd.work, int(gsvd.work[0])) + gsvd.k, gsvd.l, ok = lapack64.Ggsvd3(jobU, jobV, jobQ, aCopy.mat, bCopy.mat, gsvd.s1, gsvd.s2, gsvd.u, gsvd.v, gsvd.q, gsvd.work, len(gsvd.work), gsvd.iwork) + if ok { + gsvd.a = aCopy.mat + gsvd.b = bCopy.mat + gsvd.kind = kind + } + return ok +} + +// Kind returns the GSVDKind of the decomposition. If no decomposition has been +// computed, Kind returns -1. +func (gsvd *GSVD) Kind() GSVDKind { + if !gsvd.succFact() { + return -1 + } + return gsvd.kind +} + +// Rank returns the k and l terms of the rank of [ A^T B^T ]^T. +func (gsvd *GSVD) Rank() (k, l int) { + return gsvd.k, gsvd.l +} + +// GeneralizedValues returns the generalized singular values of the factorized matrices. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and GeneralizedValues will +// panic with matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// GeneralizedValues will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) GeneralizedValues(v []float64) []float64 { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if v == nil { + v = make([]float64, d-k) + } + if len(v) != d-k { + panic(ErrSliceLengthMismatch) + } + floats.DivTo(v, gsvd.s1[k:d], gsvd.s2[k:d]) + return v +} + +// ValuesA returns the singular values of the factorized A matrix. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and ValuesA will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// ValuesA will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ValuesA(s []float64) []float64 { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if s == nil { + s = make([]float64, d-k) + } + if len(s) != d-k { + panic(ErrSliceLengthMismatch) + } + copy(s, gsvd.s1[k:min(r, c)]) + return s +} + +// ValuesB returns the singular values of the factorized B matrix. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and ValuesB will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// ValuesB will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ValuesB(s []float64) []float64 { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if s == nil { + s = make([]float64, d-k) + } + if len(s) != d-k { + panic(ErrSliceLengthMismatch) + } + copy(s, gsvd.s2[k:d]) + return s +} + +// ZeroRTo extracts the matrix [ 0 R ] from the singular value decomposition, storing +// the result in-place into dst. [ 0 R ] is size (k+l)×c. +// If dst is nil, a new matrix is allocated. The resulting ZeroR matrix is returned. +// +// ZeroRTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ZeroRTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + l := gsvd.l + h := min(k+l, r) + if dst == nil { + dst = NewDense(k+l, c, nil) + } else { + dst.reuseAsZeroed(k+l, c) + } + a := Dense{ + mat: gsvd.a, + capRows: r, + capCols: c, + } + dst.Slice(0, h, c-k-l, c).(*Dense). + Copy(a.Slice(0, h, c-k-l, c)) + if r < k+l { + b := Dense{ + mat: gsvd.b, + capRows: gsvd.p, + capCols: c, + } + dst.Slice(r, k+l, c+r-k-l, c).(*Dense). + Copy(b.Slice(r-k, l, c+r-k-l, c)) + } + return dst +} + +// SigmaATo extracts the matrix Σ₁ from the singular value decomposition, storing +// the result in-place into dst. Σ₁ is size r×(k+l). +// If dst is nil, a new matrix is allocated. The resulting SigmaA matrix is returned. +// +// SigmaATo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) SigmaATo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + k := gsvd.k + l := gsvd.l + if dst == nil { + dst = NewDense(r, k+l, nil) + } else { + dst.reuseAsZeroed(r, k+l) + } + for i := 0; i < k; i++ { + dst.set(i, i, 1) + } + for i := k; i < min(r, k+l); i++ { + dst.set(i, i, gsvd.s1[i]) + } + return dst +} + +// SigmaBTo extracts the matrix Σ₂ from the singular value decomposition, storing +// the result in-place into dst. Σ₂ is size p×(k+l). +// If dst is nil, a new matrix is allocated. The resulting SigmaB matrix is returned. +// +// SigmaBTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) SigmaBTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + p := gsvd.p + k := gsvd.k + l := gsvd.l + if dst == nil { + dst = NewDense(p, k+l, nil) + } else { + dst.reuseAsZeroed(p, k+l) + } + for i := 0; i < min(l, r-k); i++ { + dst.set(i, i+k, gsvd.s2[k+i]) + } + for i := r - k; i < l; i++ { + dst.set(i, i+k, 1) + } + return dst +} + +// UTo extracts the matrix U from the singular value decomposition, storing +// the result in-place into dst. U is size r×r. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +// +// UTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) UTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if gsvd.kind&GSVDU == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.u.Rows + c := gsvd.u.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.u, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} + +// VTo extracts the matrix V from the singular value decomposition, storing +// the result in-place into dst. V is size p×p. +// If dst is nil, a new matrix is allocated. The resulting V matrix is returned. +// +// VTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) VTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if gsvd.kind&GSVDV == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.v.Rows + c := gsvd.v.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.v, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} + +// QTo extracts the matrix Q from the singular value decomposition, storing +// the result in-place into dst. Q is size c×c. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +// +// QTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) QTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if gsvd.kind&GSVDQ == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.q.Rows + c := gsvd.q.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.q, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} diff --git a/vendor/gonum.org/v1/gonum/mat/hogsvd.go b/vendor/gonum.org/v1/gonum/mat/hogsvd.go new file mode 100644 index 00000000..bd843e6b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/hogsvd.go @@ -0,0 +1,233 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "errors" + + "gonum.org/v1/gonum/blas/blas64" +) + +// HOGSVD is a type for creating and using the Higher Order Generalized Singular Value +// Decomposition (HOGSVD) of a set of matrices. +// +// The factorization is a linear transformation of the data sets from the given +// variable×sample spaces to reduced and diagonalized "eigenvariable"×"eigensample" +// spaces. +type HOGSVD struct { + n int + v *Dense + b []Dense + + err error +} + +// succFact returns whether the receiver contains a successful factorization. +func (gsvd *HOGSVD) succFact() bool { + return gsvd.n != 0 +} + +// Factorize computes the higher order generalized singular value decomposition (HOGSVD) +// of the n input r_i×c column tall matrices in m. HOGSV extends the GSVD case from 2 to n +// input matrices. +// +// M_0 = U_0 * Σ_0 * V^T +// M_1 = U_1 * Σ_1 * V^T +// . +// . +// . +// M_{n-1} = U_{n-1} * Σ_{n-1} * V^T +// +// where U_i are r_i×c matrices of singular vectors, Σ are c×c matrices singular values, and V +// is a c×c matrix of singular vectors. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (gsvd *HOGSVD) Factorize(m ...Matrix) (ok bool) { + // Factorize performs the HOGSVD factorisation + // essentially as described by Ponnapalli et al. + // https://doi.org/10.1371/journal.pone.0028072 + + if len(m) < 2 { + panic("hogsvd: too few matrices") + } + gsvd.n = 0 + + r, c := m[0].Dims() + a := make([]Cholesky, len(m)) + var ts SymDense + for i, d := range m { + rd, cd := d.Dims() + if rd < cd { + gsvd.err = ErrShape + return false + } + if rd > r { + r = rd + } + if cd != c { + panic(ErrShape) + } + ts.Reset() + ts.SymOuterK(1, d.T()) + ok = a[i].Factorize(&ts) + if !ok { + gsvd.err = errors.New("hogsvd: cholesky decomposition failed") + return false + } + } + + s := getWorkspace(c, c, true) + defer putWorkspace(s) + sij := getWorkspace(c, c, false) + defer putWorkspace(sij) + for i, ai := range a { + for _, aj := range a[i+1:] { + gsvd.err = ai.SolveCholTo(sij, &aj) + if gsvd.err != nil { + return false + } + s.Add(s, sij) + + gsvd.err = aj.SolveCholTo(sij, &ai) + if gsvd.err != nil { + return false + } + s.Add(s, sij) + } + } + s.Scale(1/float64(len(m)*(len(m)-1)), s) + + var eig Eigen + ok = eig.Factorize(s.T(), EigenRight) + if !ok { + gsvd.err = errors.New("hogsvd: eigen decomposition failed") + return false + } + vc := eig.VectorsTo(nil) + // vc is guaranteed to have real eigenvalues. + rc, cc := vc.Dims() + v := NewDense(rc, cc, nil) + for i := 0; i < rc; i++ { + for j := 0; j < cc; j++ { + a := vc.At(i, j) + v.set(i, j, real(a)) + } + } + // Rescale the columns of v by their Frobenius norms. + // Work done in cv is reflected in v. + var cv VecDense + for j := 0; j < c; j++ { + cv.ColViewOf(v, j) + cv.ScaleVec(1/blas64.Nrm2(cv.mat), &cv) + } + + b := make([]Dense, len(m)) + biT := getWorkspace(c, r, false) + defer putWorkspace(biT) + for i, d := range m { + // All calls to reset will leave a zeroed + // matrix with capacity to store the result + // without additional allocation. + biT.Reset() + gsvd.err = biT.Solve(v, d.T()) + if gsvd.err != nil { + return false + } + b[i].Clone(biT.T()) + } + + gsvd.n = len(m) + gsvd.v = v + gsvd.b = b + return true +} + +// Err returns the reason for a factorization failure. +func (gsvd *HOGSVD) Err() error { + return gsvd.err +} + +// Len returns the number of matrices that have been factorized. If Len returns +// zero, the factorization was not successful. +func (gsvd *HOGSVD) Len() int { + return gsvd.n +} + +// UTo extracts the matrix U_n from the singular value decomposition, storing +// the result in-place into dst. U_n is size r×c. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +// +// UTo will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) UTo(dst *Dense, n int) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if n < 0 || gsvd.n <= n { + panic("hogsvd: invalid index") + } + + if dst == nil { + r, c := gsvd.b[n].Dims() + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(gsvd.b[n].Dims()) + } + dst.Copy(&gsvd.b[n]) + var v VecDense + for j, f := range gsvd.Values(nil, n) { + v.ColViewOf(dst, j) + v.ScaleVec(1/f, &v) + } + return dst +} + +// Values returns the nth set of singular values of the factorized system. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length c, and Values will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// Values will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) Values(s []float64, n int) []float64 { + if !gsvd.succFact() { + panic(badFact) + } + if n < 0 || gsvd.n <= n { + panic("hogsvd: invalid index") + } + + _, c := gsvd.b[n].Dims() + if s == nil { + s = make([]float64, c) + } else if len(s) != c { + panic(ErrSliceLengthMismatch) + } + var v VecDense + for j := 0; j < c; j++ { + v.ColViewOf(&gsvd.b[n], j) + s[j] = blas64.Nrm2(v.mat) + } + return s +} + +// VTo extracts the matrix V from the singular value decomposition, storing +// the result in-place into dst. V is size c×c. +// If dst is nil, a new matrix is allocated. The resulting V matrix is returned. +// +// VTo will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) VTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if dst == nil { + r, c := gsvd.v.Dims() + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(gsvd.v.Dims()) + } + dst.Copy(gsvd.v) + return dst +} diff --git a/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go b/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go new file mode 100644 index 00000000..59815a67 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go @@ -0,0 +1,348 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file must be kept in sync with index_no_bound_checks.go. + +// +build bounds + +package mat + +// At returns the element at row i, column j. +func (m *Dense) At(i, j int) float64 { + return m.at(i, j) +} + +func (m *Dense) at(i, j int) float64 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *Dense) Set(i, j int, v float64) { + m.set(i, j, v) +} + +func (m *Dense) set(i, j int, v float64) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (m *CDense) At(i, j int) complex128 { + return m.at(i, j) +} + +func (m *CDense) at(i, j int) complex128 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *CDense) Set(i, j int, v complex128) { + m.set(i, j, v) +} + +func (m *CDense) set(i, j int, v complex128) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i. +// It panics if i is out of bounds or if j is not zero. +func (v *VecDense) At(i, j int) float64 { + if j != 0 { + panic(ErrColAccess) + } + return v.at(i) +} + +// AtVec returns the element at row i. +// It panics if i is out of bounds. +func (v *VecDense) AtVec(i int) float64 { + return v.at(i) +} + +func (v *VecDense) at(i int) float64 { + if uint(i) >= uint(v.mat.N) { + panic(ErrRowAccess) + } + return v.mat.Data[i*v.mat.Inc] +} + +// SetVec sets the element at row i to the value val. +// It panics if i is out of bounds. +func (v *VecDense) SetVec(i int, val float64) { + v.setVec(i, val) +} + +func (v *VecDense) setVec(i int, val float64) { + if uint(i) >= uint(v.mat.N) { + panic(ErrVectorAccess) + } + v.mat.Data[i*v.mat.Inc] = val +} + +// At returns the element at row i and column j. +func (t *SymDense) At(i, j int) float64 { + return t.at(i, j) +} + +func (t *SymDense) at(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetSym sets the elements at (i,j) and (j,i) to the value v. +func (t *SymDense) SetSym(i, j int, v float64) { + t.set(i, j, v) +} + +func (t *SymDense) set(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (t *TriDense) At(i, j int) float64 { + return t.at(i, j) +} + +func (t *TriDense) at(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetTri sets the element of the triangular matrix at row i, column j to the value v. +// It panics if the location is outside the appropriate half of the matrix. +func (t *TriDense) SetTri(i, j int, v float64) { + t.set(i, j, v) +} + +func (t *TriDense) set(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (b *BandDense) At(i, j int) float64 { + return b.at(i, j) +} + +func (b *BandDense) at(i, j int) float64 { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + return 0 + } + return b.mat.Data[i*b.mat.Stride+pj] +} + +// SetBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (b *BandDense) SetBand(i, j int, v float64) { + b.set(i, j, v) +} + +func (b *BandDense) set(i, j int, v float64) { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + panic(ErrBandSet) + } + b.mat.Data[i*b.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (s *SymBandDense) At(i, j int) float64 { + return s.at(i, j) +} + +func (s *SymBandDense) at(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + return 0 + } + return s.mat.Data[i*s.mat.Stride+pj] +} + +// SetSymBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (s *SymBandDense) SetSymBand(i, j int, v float64) { + s.set(i, j, v) +} + +func (s *SymBandDense) set(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + panic(ErrBandSet) + } + s.mat.Data[i*s.mat.Stride+pj] = v +} + +func (t *TriBandDense) At(i, j int) float64 { + return t.at(i, j) +} + +func (t *TriBandDense) at(i, j int) float64 { + // TODO(btracey): Support Diag field, see #692. + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + kl, ku := t.mat.K, 0 + if isUpper { + kl, ku = 0, t.mat.K + } + pj := j + kl - i + if pj < 0 || kl+ku+1 <= pj { + return 0 + } + return t.mat.Data[i*t.mat.Stride+pj] +} + +func (t *TriBandDense) SetTriBand(i, j int, v float64) { + t.setTriBand(i, j, v) +} + +func (t *TriBandDense) setTriBand(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + kl, ku := t.mat.K, 0 + if isUpper { + kl, ku = 0, t.mat.K + } + pj := j + kl - i + if pj < 0 || kl+ku+1 <= pj { + panic(ErrBandSet) + } + // TODO(btracey): Support Diag field, see #692. + t.mat.Data[i*t.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (d *DiagDense) At(i, j int) float64 { + return d.at(i, j) +} + +func (d *DiagDense) at(i, j int) float64 { + if uint(i) >= uint(d.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(d.mat.N) { + panic(ErrColAccess) + } + if i != j { + return 0 + } + return d.mat.Data[i*d.mat.Inc] +} + +// SetDiag sets the element at row i, column i to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (d *DiagDense) SetDiag(i int, v float64) { + d.setDiag(i, v) +} + +func (d *DiagDense) setDiag(i int, v float64) { + if uint(i) >= uint(d.mat.N) { + panic(ErrRowAccess) + } + d.mat.Data[i*d.mat.Inc] = v +} diff --git a/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go b/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go new file mode 100644 index 00000000..051f8437 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go @@ -0,0 +1,359 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file must be kept in sync with index_bound_checks.go. + +// +build !bounds + +package mat + +// At returns the element at row i, column j. +func (m *Dense) At(i, j int) float64 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.at(i, j) +} + +func (m *Dense) at(i, j int) float64 { + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *Dense) Set(i, j int, v float64) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.set(i, j, v) +} + +func (m *Dense) set(i, j int, v float64) { + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (m *CDense) At(i, j int) complex128 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.at(i, j) +} + +func (m *CDense) at(i, j int) complex128 { + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *CDense) Set(i, j int, v complex128) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.set(i, j, v) +} + +func (m *CDense) set(i, j int, v complex128) { + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i. +// It panics if i is out of bounds or if j is not zero. +func (v *VecDense) At(i, j int) float64 { + if uint(i) >= uint(v.mat.N) { + panic(ErrRowAccess) + } + if j != 0 { + panic(ErrColAccess) + } + return v.at(i) +} + +// AtVec returns the element at row i. +// It panics if i is out of bounds. +func (v *VecDense) AtVec(i int) float64 { + if uint(i) >= uint(v.mat.N) { + panic(ErrRowAccess) + } + return v.at(i) +} + +func (v *VecDense) at(i int) float64 { + return v.mat.Data[i*v.mat.Inc] +} + +// SetVec sets the element at row i to the value val. +// It panics if i is out of bounds. +func (v *VecDense) SetVec(i int, val float64) { + if uint(i) >= uint(v.mat.N) { + panic(ErrVectorAccess) + } + v.setVec(i, val) +} + +func (v *VecDense) setVec(i int, val float64) { + v.mat.Data[i*v.mat.Inc] = val +} + +// At returns the element at row i and column j. +func (s *SymDense) At(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + return s.at(i, j) +} + +func (s *SymDense) at(i, j int) float64 { + if i > j { + i, j = j, i + } + return s.mat.Data[i*s.mat.Stride+j] +} + +// SetSym sets the elements at (i,j) and (j,i) to the value v. +func (s *SymDense) SetSym(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + s.set(i, j, v) +} + +func (s *SymDense) set(i, j int, v float64) { + if i > j { + i, j = j, i + } + s.mat.Data[i*s.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (t *TriDense) At(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + return t.at(i, j) +} + +func (t *TriDense) at(i, j int) float64 { + isUpper := t.triKind() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetTri sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate half of the matrix. +func (t *TriDense) SetTri(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + t.set(i, j, v) +} + +func (t *TriDense) set(i, j int, v float64) { + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (b *BandDense) At(i, j int) float64 { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + return b.at(i, j) +} + +func (b *BandDense) at(i, j int) float64 { + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + return 0 + } + return b.mat.Data[i*b.mat.Stride+pj] +} + +// SetBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (b *BandDense) SetBand(i, j int, v float64) { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + panic(ErrBandSet) + } + b.set(i, j, v) +} + +func (b *BandDense) set(i, j int, v float64) { + pj := j + b.mat.KL - i + b.mat.Data[i*b.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (s *SymBandDense) At(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + return s.at(i, j) +} + +func (s *SymBandDense) at(i, j int) float64 { + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + return 0 + } + return s.mat.Data[i*s.mat.Stride+pj] +} + +// SetSymBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (s *SymBandDense) SetSymBand(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + s.set(i, j, v) +} + +func (s *SymBandDense) set(i, j int, v float64) { + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + panic(ErrBandSet) + } + s.mat.Data[i*s.mat.Stride+pj] = v +} + +func (t *TriBandDense) At(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + return t.at(i, j) +} + +func (t *TriBandDense) at(i, j int) float64 { + // TODO(btracey): Support Diag field, see #692. + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + kl := t.mat.K + ku := 0 + if isUpper { + ku = t.mat.K + kl = 0 + } + pj := j + kl - i + if pj < 0 || kl+ku+1 <= pj { + return 0 + } + return t.mat.Data[i*t.mat.Stride+pj] +} + +func (t *TriBandDense) SetTriBand(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + kl, ku := t.mat.K, 0 + if isUpper { + kl, ku = 0, t.mat.K + } + pj := j + kl - i + if pj < 0 || kl+ku+1 <= pj { + panic(ErrBandSet) + } + // TODO(btracey): Support Diag field, see #692. + t.mat.Data[i*t.mat.Stride+pj] = v +} + +func (t *TriBandDense) setTriBand(i, j int, v float64) { + var kl int + if !t.isUpper() { + kl = t.mat.K + } + pj := j + kl - i + t.mat.Data[i*t.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (d *DiagDense) At(i, j int) float64 { + if uint(i) >= uint(d.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(d.mat.N) { + panic(ErrColAccess) + } + return d.at(i, j) +} + +func (d *DiagDense) at(i, j int) float64 { + if i != j { + return 0 + } + return d.mat.Data[i*d.mat.Inc] +} + +// SetDiag sets the element at row i, column i to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (d *DiagDense) SetDiag(i int, v float64) { + if uint(i) >= uint(d.mat.N) { + panic(ErrRowAccess) + } + d.setDiag(i, v) +} + +func (d *DiagDense) setDiag(i int, v float64) { + d.mat.Data[i*d.mat.Inc] = v +} diff --git a/vendor/gonum.org/v1/gonum/mat/inner.go b/vendor/gonum.org/v1/gonum/mat/inner.go new file mode 100644 index 00000000..fba3e0b0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/inner.go @@ -0,0 +1,121 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Inner computes the generalized inner product +// x^T A y +// between column vectors x and y with matrix A. This is only a true inner product if +// A is symmetric positive definite, though the operation works for any matrix A. +// +// Inner panics if x.Len != m or y.Len != n when A is an m x n matrix. +func Inner(x Vector, a Matrix, y Vector) float64 { + m, n := a.Dims() + if x.Len() != m { + panic(ErrShape) + } + if y.Len() != n { + panic(ErrShape) + } + if m == 0 || n == 0 { + return 0 + } + + var sum float64 + + switch a := a.(type) { + case RawSymmetricer: + amat := a.RawSymmetric() + if amat.Uplo != blas.Upper { + // Panic as a string not a mat.Error. + panic(badSymTriangle) + } + var xmat, ymat blas64.Vector + if xrv, ok := x.(RawVectorer); ok { + xmat = xrv.RawVector() + } else { + break + } + if yrv, ok := y.(RawVectorer); ok { + ymat = yrv.RawVector() + } else { + break + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + if xi != 0 { + if ymat.Inc == 1 { + sum += xi * f64.DotUnitary( + amat.Data[i*amat.Stride+i:i*amat.Stride+n], + ymat.Data[i:], + ) + } else { + sum += xi * f64.DotInc( + amat.Data[i*amat.Stride+i:i*amat.Stride+n], + ymat.Data[i*ymat.Inc:], uintptr(n-i), + 1, uintptr(ymat.Inc), + 0, 0, + ) + } + } + yi := y.AtVec(i) + if i != n-1 && yi != 0 { + if xmat.Inc == 1 { + sum += yi * f64.DotUnitary( + amat.Data[i*amat.Stride+i+1:i*amat.Stride+n], + xmat.Data[i+1:], + ) + } else { + sum += yi * f64.DotInc( + amat.Data[i*amat.Stride+i+1:i*amat.Stride+n], + xmat.Data[(i+1)*xmat.Inc:], uintptr(n-i-1), + 1, uintptr(xmat.Inc), + 0, 0, + ) + } + } + } + return sum + case RawMatrixer: + amat := a.RawMatrix() + var ymat blas64.Vector + if yrv, ok := y.(RawVectorer); ok { + ymat = yrv.RawVector() + } else { + break + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + if xi != 0 { + if ymat.Inc == 1 { + sum += xi * f64.DotUnitary( + amat.Data[i*amat.Stride:i*amat.Stride+n], + ymat.Data, + ) + } else { + sum += xi * f64.DotInc( + amat.Data[i*amat.Stride:i*amat.Stride+n], + ymat.Data, uintptr(n), + 1, uintptr(ymat.Inc), + 0, 0, + ) + } + } + } + return sum + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + for j := 0; j < y.Len(); j++ { + sum += xi * a.At(i, j) * y.AtVec(j) + } + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/mat/io.go b/vendor/gonum.org/v1/gonum/mat/io.go new file mode 100644 index 00000000..7f7ef070 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/io.go @@ -0,0 +1,492 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +// version is the current on-disk codec version. +const version uint32 = 0x1 + +// maxLen is the biggest slice/array len one can create on a 32/64b platform. +const maxLen = int64(int(^uint(0) >> 1)) + +var ( + headerSize = binary.Size(storage{}) + sizeInt64 = binary.Size(int64(0)) + sizeFloat64 = binary.Size(float64(0)) + + errWrongType = errors.New("mat: wrong data type") + + errTooBig = errors.New("mat: resulting data slice too big") + errTooSmall = errors.New("mat: input slice too small") + errBadBuffer = errors.New("mat: data buffer size mismatch") + errBadSize = errors.New("mat: invalid dimension") +) + +// Type encoding scheme: +// +// Type Form Packing Uplo Unit Rows Columns kU kL +// uint8 [GST] uint8 [BPF] uint8 [AUL] bool int64 int64 int64 int64 +// General 'G' 'F' 'A' false r c 0 0 +// Band 'G' 'B' 'A' false r c kU kL +// Symmetric 'S' 'F' ul false n n 0 0 +// SymmetricBand 'S' 'B' ul false n n k k +// SymmetricPacked 'S' 'P' ul false n n 0 0 +// Triangular 'T' 'F' ul Diag==Unit n n 0 0 +// TriangularBand 'T' 'B' ul Diag==Unit n n k k +// TriangularPacked 'T' 'P' ul Diag==Unit n n 0 0 +// +// G - general, S - symmetric, T - triangular +// F - full, B - band, P - packed +// A - all, U - upper, L - lower + +// MarshalBinary encodes the receiver into a binary form and returns the result. +// +// Dense is little-endian encoded as follows: +// 0 - 3 Version = 1 (uint32) +// 4 'G' (byte) +// 5 'F' (byte) +// 6 'A' (byte) +// 7 0 (byte) +// 8 - 15 number of rows (int64) +// 16 - 23 number of columns (int64) +// 24 - 31 0 (int64) +// 32 - 39 0 (int64) +// 40 - .. matrix data elements (float64) +// [0,0] [0,1] ... [0,ncols-1] +// [1,0] [1,1] ... [1,ncols-1] +// ... +// [nrows-1,0] ... [nrows-1,ncols-1] +func (m Dense) MarshalBinary() ([]byte, error) { + bufLen := int64(headerSize) + int64(m.mat.Rows)*int64(m.mat.Cols)*int64(sizeFloat64) + if bufLen <= 0 { + // bufLen is too big and has wrapped around. + return nil, errTooBig + } + + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(m.mat.Rows), Cols: int64(m.mat.Cols), + Version: version, + } + buf := make([]byte, bufLen) + n, err := header.marshalBinaryTo(bytes.NewBuffer(buf[:0])) + if err != nil { + return buf[:n], err + } + + p := headerSize + r, c := m.Dims() + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + binary.LittleEndian.PutUint64(buf[p:p+sizeFloat64], math.Float64bits(m.at(i, j))) + p += sizeFloat64 + } + } + + return buf, nil +} + +// MarshalBinaryTo encodes the receiver into a binary form and writes it into w. +// MarshalBinaryTo returns the number of bytes written into w and an error, if any. +// +// See MarshalBinary for the on-disk layout. +func (m Dense) MarshalBinaryTo(w io.Writer) (int, error) { + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(m.mat.Rows), Cols: int64(m.mat.Cols), + Version: version, + } + n, err := header.marshalBinaryTo(w) + if err != nil { + return n, err + } + + r, c := m.Dims() + var b [8]byte + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + binary.LittleEndian.PutUint64(b[:], math.Float64bits(m.at(i, j))) + nn, err := w.Write(b[:]) + n += nn + if err != nil { + return n, err + } + } + } + + return n, nil +} + +// UnmarshalBinary decodes the binary form into the receiver. +// It panics if the receiver is a non-zero Dense matrix. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows or columns is negative, +// - an error is returned if the resulting Dense matrix is too +// big for the current architecture (e.g. a 16GB matrix written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled matrix, and so +// it should not be used on untrusted data. +func (m *Dense) UnmarshalBinary(data []byte) error { + if !m.IsZero() { + panic("mat: unmarshal into non-zero matrix") + } + + if len(data) < headerSize { + return errTooSmall + } + + var header storage + err := header.unmarshalBinary(data[:headerSize]) + if err != nil { + return err + } + rows := header.Rows + cols := header.Cols + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return errWrongType + } + if rows < 0 || cols < 0 { + return errBadSize + } + size := rows * cols + if size == 0 { + return ErrZeroLength + } + if int(size) < 0 || size > maxLen { + return errTooBig + } + if len(data) != headerSize+int(rows*cols)*sizeFloat64 { + return errBadBuffer + } + + p := headerSize + m.reuseAs(int(rows), int(cols)) + for i := range m.mat.Data { + m.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[p : p+sizeFloat64])) + p += sizeFloat64 + } + + return nil +} + +// UnmarshalBinaryFrom decodes the binary form into the receiver and returns +// the number of bytes read and an error if any. +// It panics if the receiver is a non-zero Dense matrix. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows or columns is negative, +// - an error is returned if the resulting Dense matrix is too +// big for the current architecture (e.g. a 16GB matrix written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled matrix, and so +// it should not be used on untrusted data. +func (m *Dense) UnmarshalBinaryFrom(r io.Reader) (int, error) { + if !m.IsZero() { + panic("mat: unmarshal into non-zero matrix") + } + + var header storage + n, err := header.unmarshalBinaryFrom(r) + if err != nil { + return n, err + } + rows := header.Rows + cols := header.Cols + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return n, errWrongType + } + if rows < 0 || cols < 0 { + return n, errBadSize + } + size := rows * cols + if size == 0 { + return n, ErrZeroLength + } + if int(size) < 0 || size > maxLen { + return n, errTooBig + } + + m.reuseAs(int(rows), int(cols)) + var b [8]byte + for i := range m.mat.Data { + nn, err := readFull(r, b[:]) + n += nn + if err != nil { + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err + } + m.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(b[:])) + } + + return n, nil +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +// +// VecDense is little-endian encoded as follows: +// +// 0 - 3 Version = 1 (uint32) +// 4 'G' (byte) +// 5 'F' (byte) +// 6 'A' (byte) +// 7 0 (byte) +// 8 - 15 number of elements (int64) +// 16 - 23 1 (int64) +// 24 - 31 0 (int64) +// 32 - 39 0 (int64) +// 40 - .. vector's data elements (float64) +func (v VecDense) MarshalBinary() ([]byte, error) { + bufLen := int64(headerSize) + int64(v.mat.N)*int64(sizeFloat64) + if bufLen <= 0 { + // bufLen is too big and has wrapped around. + return nil, errTooBig + } + + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(v.mat.N), Cols: 1, + Version: version, + } + buf := make([]byte, bufLen) + n, err := header.marshalBinaryTo(bytes.NewBuffer(buf[:0])) + if err != nil { + return buf[:n], err + } + + p := headerSize + for i := 0; i < v.mat.N; i++ { + binary.LittleEndian.PutUint64(buf[p:p+sizeFloat64], math.Float64bits(v.at(i))) + p += sizeFloat64 + } + + return buf, nil +} + +// MarshalBinaryTo encodes the receiver into a binary form, writes it to w and +// returns the number of bytes written and an error if any. +// +// See MarshalBainry for the on-disk format. +func (v VecDense) MarshalBinaryTo(w io.Writer) (int, error) { + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(v.mat.N), Cols: 1, + Version: version, + } + n, err := header.marshalBinaryTo(w) + if err != nil { + return n, err + } + + var buf [8]byte + for i := 0; i < v.mat.N; i++ { + binary.LittleEndian.PutUint64(buf[:], math.Float64bits(v.at(i))) + nn, err := w.Write(buf[:]) + n += nn + if err != nil { + return n, err + } + } + + return n, nil +} + +// UnmarshalBinary decodes the binary form into the receiver. +// It panics if the receiver is a non-zero VecDense. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows is negative, +// - an error is returned if the resulting VecDense is too +// big for the current architecture (e.g. a 16GB vector written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled vector, and so +// it should not be used on untrusted data. +func (v *VecDense) UnmarshalBinary(data []byte) error { + if !v.IsZero() { + panic("mat: unmarshal into non-zero vector") + } + + if len(data) < headerSize { + return errTooSmall + } + + var header storage + err := header.unmarshalBinary(data[:headerSize]) + if err != nil { + return err + } + if header.Cols != 1 { + return ErrShape + } + n := header.Rows + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return errWrongType + } + if n == 0 { + return ErrZeroLength + } + if n < 0 { + return errBadSize + } + if int64(maxLen) < n { + return errTooBig + } + if len(data) != headerSize+int(n)*sizeFloat64 { + return errBadBuffer + } + + p := headerSize + v.reuseAs(int(n)) + for i := range v.mat.Data { + v.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[p : p+sizeFloat64])) + p += sizeFloat64 + } + + return nil +} + +// UnmarshalBinaryFrom decodes the binary form into the receiver, from the +// io.Reader and returns the number of bytes read and an error if any. +// It panics if the receiver is a non-zero VecDense. +// +// See MarshalBinary for the on-disk layout. +// See UnmarshalBinary for the list of sanity checks performed on the input. +func (v *VecDense) UnmarshalBinaryFrom(r io.Reader) (int, error) { + if !v.IsZero() { + panic("mat: unmarshal into non-zero vector") + } + + var header storage + n, err := header.unmarshalBinaryFrom(r) + if err != nil { + return n, err + } + if header.Cols != 1 { + return n, ErrShape + } + l := header.Rows + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return n, errWrongType + } + if l == 0 { + return n, ErrZeroLength + } + if l < 0 { + return n, errBadSize + } + if int64(maxLen) < l { + return n, errTooBig + } + + v.reuseAs(int(l)) + var b [8]byte + for i := range v.mat.Data { + nn, err := readFull(r, b[:]) + n += nn + if err != nil { + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err + } + v.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(b[:])) + } + + return n, nil +} + +// storage is the internal representation of the storage format of a +// serialised matrix. +type storage struct { + Version uint32 // Keep this first. + Form byte // [GST] + Packing byte // [BPF] + Uplo byte // [AUL] + Unit bool + Rows int64 + Cols int64 + KU int64 + KL int64 +} + +// TODO(kortschak): Consider replacing these with calls to direct +// encoding/decoding of fields rather than to binary.Write/binary.Read. + +func (s storage) marshalBinaryTo(w io.Writer) (int, error) { + buf := bytes.NewBuffer(make([]byte, 0, headerSize)) + err := binary.Write(buf, binary.LittleEndian, s) + if err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +func (s *storage) unmarshalBinary(buf []byte) error { + err := binary.Read(bytes.NewReader(buf), binary.LittleEndian, s) + if err != nil { + return err + } + if s.Version != version { + return fmt.Errorf("mat: incorrect version: %d", s.Version) + } + return nil +} + +func (s *storage) unmarshalBinaryFrom(r io.Reader) (int, error) { + buf := make([]byte, headerSize) + n, err := readFull(r, buf) + if err != nil { + return n, err + } + return n, s.unmarshalBinary(buf[:n]) +} + +// readFull reads from r into buf until it has read len(buf). +// It returns the number of bytes copied and an error if fewer bytes were read. +// If an EOF happens after reading fewer than len(buf) bytes, io.ErrUnexpectedEOF is returned. +func readFull(r io.Reader, buf []byte) (int, error) { + var n int + var err error + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n == len(buf) { + return n, nil + } + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err +} diff --git a/vendor/gonum.org/v1/gonum/mat/lq.go b/vendor/gonum.org/v1/gonum/mat/lq.go new file mode 100644 index 00000000..d788457d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/lq.go @@ -0,0 +1,262 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const badLQ = "mat: invalid LQ factorization" + +// LQ is a type for creating and using the LQ factorization of a matrix. +type LQ struct { + lq *Dense + tau []float64 + cond float64 +} + +func (lq *LQ) updateCond(norm lapack.MatrixNorm) { + // Since A = L*Q, and Q is orthogonal, we get for the condition number κ + // κ(A) := |A| |A^-1| = |L*Q| |(L*Q)^-1| = |L| |Q^T * L^-1| + // = |L| |L^-1| = κ(L), + // where we used that fact that Q^-1 = Q^T. However, this assumes that + // the matrix norm is invariant under orthogonal transformations which + // is not the case for CondNorm. Hopefully the error is negligible: κ + // is only a qualitative measure anyway. + m := lq.lq.mat.Rows + work := getFloats(3*m, false) + iwork := getInts(m, false) + l := lq.lq.asTriDense(m, blas.NonUnit, blas.Lower) + v := lapack64.Trcon(norm, l.mat, work, iwork) + lq.cond = 1 / v + putFloats(work) + putInts(iwork) +} + +// Factorize computes the LQ factorization of an m×n matrix a where n <= m. The LQ +// factorization always exists even if A is singular. +// +// The LQ decomposition is a factorization of the matrix A such that A = L * Q. +// The matrix Q is an orthonormal n×n matrix, and L is an m×n upper triangular matrix. +// L and Q can be extracted from the LTo and QTo methods. +func (lq *LQ) Factorize(a Matrix) { + lq.factorize(a, CondNorm) +} + +func (lq *LQ) factorize(a Matrix, norm lapack.MatrixNorm) { + m, n := a.Dims() + if m > n { + panic(ErrShape) + } + k := min(m, n) + if lq.lq == nil { + lq.lq = &Dense{} + } + lq.lq.Clone(a) + work := []float64{0} + lq.tau = make([]float64, k) + lapack64.Gelqf(lq.lq.mat, lq.tau, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Gelqf(lq.lq.mat, lq.tau, work, len(work)) + putFloats(work) + lq.updateCond(norm) +} + +// isValid returns whether the receiver contains a factorization. +func (lq *LQ) isValid() bool { + return lq.lq != nil && !lq.lq.IsZero() +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a factorization. +func (lq *LQ) Cond() float64 { + if !lq.isValid() { + panic(badLQ) + } + return lq.cond +} + +// TODO(btracey): Add in the "Reduced" forms for extracting the m×m orthogonal +// and upper triangular matrices. + +// LTo extracts the m×n lower trapezoidal matrix from a LQ decomposition. +// If dst is nil, a new matrix is allocated. The resulting L matrix is returned. +// LTo will panic if the receiver does not contain a factorization. +func (lq *LQ) LTo(dst *Dense) *Dense { + if !lq.isValid() { + panic(badLQ) + } + + r, c := lq.lq.Dims() + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + // Disguise the LQ as a lower triangular. + t := &TriDense{ + mat: blas64.Triangular{ + N: r, + Stride: lq.lq.mat.Stride, + Data: lq.lq.mat.Data, + Uplo: blas.Lower, + Diag: blas.NonUnit, + }, + cap: lq.lq.capCols, + } + dst.Copy(t) + + if r == c { + return dst + } + // Zero right of the triangular. + for i := 0; i < r; i++ { + zero(dst.mat.Data[i*dst.mat.Stride+r : i*dst.mat.Stride+c]) + } + + return dst +} + +// QTo extracts the n×n orthonormal matrix Q from an LQ decomposition. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +// QTo will panic if the receiver does not contain a factorization. +func (lq *LQ) QTo(dst *Dense) *Dense { + if !lq.isValid() { + panic(badLQ) + } + + _, c := lq.lq.Dims() + if dst == nil { + dst = NewDense(c, c, nil) + } else { + dst.reuseAsZeroed(c, c) + } + q := dst.mat + + // Set Q = I. + ldq := q.Stride + for i := 0; i < c; i++ { + q.Data[i*ldq+i] = 1 + } + + // Construct Q from the elementary reflectors. + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, q, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, q, work, len(work)) + putFloats(work) + + return dst +} + +// SolveTo finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and b, where A is an m×n matrix represented in its LQ factorized +// form. If A is singular or near-singular a Condition error is returned. +// See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters. +// If trans == false, find the minimum norm solution of A * X = B. +// If trans == true, find X such that ||A*X - B||_2 is minimized. +// The solution matrix, X, is stored in place into dst. +// SolveTo will panic if the receiver does not contain a factorization. +func (lq *LQ) SolveTo(dst *Dense, trans bool, b Matrix) error { + if !lq.isValid() { + panic(badLQ) + } + + r, c := lq.lq.Dims() + br, bc := b.Dims() + + // The LQ solve algorithm stores the result in-place into the right hand side. + // The storage for the answer must be large enough to hold both b and x. + // However, this method's receiver must be the size of x. Copy b, and then + // copy the result into x at the end. + if trans { + if c != br { + panic(ErrShape) + } + dst.reuseAs(r, bc) + } else { + if r != br { + panic(ErrShape) + } + dst.reuseAs(c, bc) + } + // Do not need to worry about overlap between x and b because w has its own + // independent storage. + w := getWorkspace(max(r, c), bc, false) + w.Copy(b) + t := lq.lq.asTriDense(lq.lq.mat.Rows, blas.NonUnit, blas.Lower).mat + if trans { + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, w.mat, work, len(work)) + putFloats(work) + + ok := lapack64.Trtrs(blas.Trans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + } else { + ok := lapack64.Trtrs(blas.NoTrans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + for i := r; i < c; i++ { + zero(w.mat.Data[i*w.mat.Stride : i*w.mat.Stride+bc]) + } + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.Trans, lq.lq.mat, lq.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.Trans, lq.lq.mat, lq.tau, w.mat, work, len(work)) + putFloats(work) + } + // x was set above to be the correct size for the result. + dst.Copy(w) + putWorkspace(w) + if lq.cond > ConditionTolerance { + return Condition(lq.cond) + } + return nil +} + +// SolveVecTo finds a minimum-norm solution to a system of linear equations. +// See LQ.SolveTo for the full documentation. +// SolveToVec will panic if the receiver does not contain a factorization. +func (lq *LQ) SolveVecTo(dst *VecDense, trans bool, b Vector) error { + if !lq.isValid() { + panic(badLQ) + } + + r, c := lq.lq.Dims() + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + bm := Matrix(b) + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if dst != b { + dst.checkOverlap(bmat) + } + b := VecDense{mat: bmat} + bm = b.asDense() + } + if trans { + dst.reuseAs(r) + } else { + dst.reuseAs(c) + } + return lq.SolveTo(dst.asDense(), trans, bm) +} diff --git a/vendor/gonum.org/v1/gonum/mat/lu.go b/vendor/gonum.org/v1/gonum/mat/lu.go new file mode 100644 index 00000000..e0437169 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/lu.go @@ -0,0 +1,422 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const ( + badSliceLength = "mat: improper slice length" + badLU = "mat: invalid LU factorization" +) + +// LU is a type for creating and using the LU factorization of a matrix. +type LU struct { + lu *Dense + pivot []int + cond float64 +} + +// updateCond updates the stored condition number of the matrix. anorm is the +// norm of the original matrix. If anorm is negative it will be estimated. +func (lu *LU) updateCond(anorm float64, norm lapack.MatrixNorm) { + n := lu.lu.mat.Cols + work := getFloats(4*n, false) + defer putFloats(work) + iwork := getInts(n, false) + defer putInts(iwork) + if anorm < 0 { + // This is an approximation. By the definition of a norm, + // |AB| <= |A| |B|. + // Since A = L*U, we get for the condition number κ that + // κ(A) := |A| |A^-1| = |L*U| |A^-1| <= |L| |U| |A^-1|, + // so this will overestimate the condition number somewhat. + // The norm of the original factorized matrix cannot be stored + // because of update possibilities. + u := lu.lu.asTriDense(n, blas.NonUnit, blas.Upper) + l := lu.lu.asTriDense(n, blas.Unit, blas.Lower) + unorm := lapack64.Lantr(norm, u.mat, work) + lnorm := lapack64.Lantr(norm, l.mat, work) + anorm = unorm * lnorm + } + v := lapack64.Gecon(norm, lu.lu.mat, anorm, work, iwork) + lu.cond = 1 / v +} + +// Factorize computes the LU factorization of the square matrix a and stores the +// result. The LU decomposition will complete regardless of the singularity of a. +// +// The LU factorization is computed with pivoting, and so really the decomposition +// is a PLU decomposition where P is a permutation matrix. The individual matrix +// factors can be extracted from the factorization using the Permutation method +// on Dense, and the LU LTo and UTo methods. +func (lu *LU) Factorize(a Matrix) { + lu.factorize(a, CondNorm) +} + +func (lu *LU) factorize(a Matrix, norm lapack.MatrixNorm) { + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + if lu.lu == nil { + lu.lu = NewDense(r, r, nil) + } else { + lu.lu.Reset() + lu.lu.reuseAs(r, r) + } + lu.lu.Copy(a) + if cap(lu.pivot) < r { + lu.pivot = make([]int, r) + } + lu.pivot = lu.pivot[:r] + work := getFloats(r, false) + anorm := lapack64.Lange(norm, lu.lu.mat, work) + putFloats(work) + lapack64.Getrf(lu.lu.mat, lu.pivot) + lu.updateCond(anorm, norm) +} + +// isValid returns whether the receiver contains a factorization. +func (lu *LU) isValid() bool { + return lu.lu != nil && !lu.lu.IsZero() +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a factorization. +func (lu *LU) Cond() float64 { + if !lu.isValid() { + panic(badLU) + } + return lu.cond +} + +// Reset resets the factorization so that it can be reused as the receiver of a +// dimensionally restricted operation. +func (lu *LU) Reset() { + if lu.lu != nil { + lu.lu.Reset() + } + lu.pivot = lu.pivot[:0] +} + +func (lu *LU) isZero() bool { + return len(lu.pivot) == 0 +} + +// Det returns the determinant of the matrix that has been factorized. In many +// expressions, using LogDet will be more numerically stable. +// Det will panic if the receiver does not contain a factorization. +func (lu *LU) Det() float64 { + det, sign := lu.LogDet() + return math.Exp(det) * sign +} + +// LogDet returns the log of the determinant and the sign of the determinant +// for the matrix that has been factorized. Numerical stability in product and +// division expressions is generally improved by working in log space. +// LogDet will panic if the receiver does not contain a factorization. +func (lu *LU) LogDet() (det float64, sign float64) { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + logDiag := getFloats(n, false) + defer putFloats(logDiag) + sign = 1.0 + for i := 0; i < n; i++ { + v := lu.lu.at(i, i) + if v < 0 { + sign *= -1 + } + if lu.pivot[i] != i { + sign *= -1 + } + logDiag[i] = math.Log(math.Abs(v)) + } + return floats.Sum(logDiag), sign +} + +// Pivot returns pivot indices that enable the construction of the permutation +// matrix P (see Dense.Permutation). If swaps == nil, then new memory will be +// allocated, otherwise the length of the input must be equal to the size of the +// factorized matrix. +// Pivot will panic if the receiver does not contain a factorization. +func (lu *LU) Pivot(swaps []int) []int { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + if swaps == nil { + swaps = make([]int, n) + } + if len(swaps) != n { + panic(badSliceLength) + } + // Perform the inverse of the row swaps in order to find the final + // row swap position. + for i := range swaps { + swaps[i] = i + } + for i := n - 1; i >= 0; i-- { + v := lu.pivot[i] + swaps[i], swaps[v] = swaps[v], swaps[i] + } + return swaps +} + +// RankOne updates an LU factorization as if a rank-one update had been applied to +// the original matrix A, storing the result into the receiver. That is, if in +// the original LU decomposition P * L * U = A, in the updated decomposition +// P * L * U = A + alpha * x * y^T. +// RankOne will panic if orig does not contain a factorization. +func (lu *LU) RankOne(orig *LU, alpha float64, x, y Vector) { + if !orig.isValid() { + panic(badLU) + } + + // RankOne uses algorithm a1 on page 28 of "Multiple-Rank Updates to Matrix + // Factorizations for Nonlinear Analysis and Circuit Design" by Linzhong Deng. + // http://web.stanford.edu/group/SOL/dissertations/Linzhong-Deng-thesis.pdf + _, n := orig.lu.Dims() + if r, c := x.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if r, c := y.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if orig != lu { + if lu.isZero() { + if cap(lu.pivot) < n { + lu.pivot = make([]int, n) + } + lu.pivot = lu.pivot[:n] + if lu.lu == nil { + lu.lu = NewDense(n, n, nil) + } else { + lu.lu.reuseAs(n, n) + } + } else if len(lu.pivot) != n { + panic(ErrShape) + } + copy(lu.pivot, orig.pivot) + lu.lu.Copy(orig.lu) + } + + xs := getFloats(n, false) + defer putFloats(xs) + ys := getFloats(n, false) + defer putFloats(ys) + for i := 0; i < n; i++ { + xs[i] = x.AtVec(i) + ys[i] = y.AtVec(i) + } + + // Adjust for the pivoting in the LU factorization + for i, v := range lu.pivot { + xs[i], xs[v] = xs[v], xs[i] + } + + lum := lu.lu.mat + omega := alpha + for j := 0; j < n; j++ { + ujj := lum.Data[j*lum.Stride+j] + ys[j] /= ujj + theta := 1 + xs[j]*ys[j]*omega + beta := omega * ys[j] / theta + gamma := omega * xs[j] + omega -= beta * gamma + lum.Data[j*lum.Stride+j] *= theta + for i := j + 1; i < n; i++ { + xs[i] -= lum.Data[i*lum.Stride+j] * xs[j] + tmp := ys[i] + ys[i] -= lum.Data[j*lum.Stride+i] * ys[j] + lum.Data[i*lum.Stride+j] += beta * xs[i] + lum.Data[j*lum.Stride+i] += gamma * tmp + } + } + lu.updateCond(-1, CondNorm) +} + +// LTo extracts the lower triangular matrix from an LU factorization. +// If dst is nil, a new matrix is allocated. The resulting L matrix is returned. +// LTo will panic if the receiver does not contain a factorization. +func (lu *LU) LTo(dst *TriDense) *TriDense { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + if dst == nil { + dst = NewTriDense(n, Lower, nil) + } else { + dst.reuseAs(n, Lower) + } + // Extract the lower triangular elements. + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + dst.mat.Data[i*dst.mat.Stride+j] = lu.lu.mat.Data[i*lu.lu.mat.Stride+j] + } + } + // Set ones on the diagonal. + for i := 0; i < n; i++ { + dst.mat.Data[i*dst.mat.Stride+i] = 1 + } + return dst +} + +// UTo extracts the upper triangular matrix from an LU factorization. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +// UTo will panic if the receiver does not contain a factorization. +func (lu *LU) UTo(dst *TriDense) *TriDense { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + if dst == nil { + dst = NewTriDense(n, Upper, nil) + } else { + dst.reuseAs(n, Upper) + } + // Extract the upper triangular elements. + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + dst.mat.Data[i*dst.mat.Stride+j] = lu.lu.mat.Data[i*lu.lu.mat.Stride+j] + } + } + return dst +} + +// Permutation constructs an r×r permutation matrix with the given row swaps. +// A permutation matrix has exactly one element equal to one in each row and column +// and all other elements equal to zero. swaps[i] specifies the row with which +// i will be swapped, which is equivalent to the non-zero column of row i. +func (m *Dense) Permutation(r int, swaps []int) { + m.reuseAs(r, r) + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+r]) + v := swaps[i] + if v < 0 || v >= r { + panic(ErrRowAccess) + } + m.mat.Data[i*m.mat.Stride+v] = 1 + } +} + +// SolveTo solves a system of linear equations using the LU decomposition of a matrix. +// It computes +// A * X = B if trans == false +// A^T * X = B if trans == true +// In both cases, A is represented in LU factorized form, and the matrix X is +// stored into dst. +// +// If A is singular or near-singular a Condition error is returned. See +// the documentation for Condition for more information. +// SolveTo will panic if the receiver does not contain a factorization. +func (lu *LU) SolveTo(dst *Dense, trans bool, b Matrix) error { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + br, bc := b.Dims() + if br != n { + panic(ErrShape) + } + // TODO(btracey): Should test the condition number instead of testing that + // the determinant is exactly zero. + if lu.Det() == 0 { + return Condition(math.Inf(1)) + } + + dst.reuseAs(n, bc) + bU, _ := untranspose(b) + var restore func() + if dst == bU { + dst, restore = dst.isolatedWorkspace(bU) + defer restore() + } else if rm, ok := bU.(RawMatrixer); ok { + dst.checkOverlap(rm.RawMatrix()) + } + + dst.Copy(b) + t := blas.NoTrans + if trans { + t = blas.Trans + } + lapack64.Getrs(t, lu.lu.mat, dst.mat, lu.pivot) + if lu.cond > ConditionTolerance { + return Condition(lu.cond) + } + return nil +} + +// SolveVecTo solves a system of linear equations using the LU decomposition of a matrix. +// It computes +// A * x = b if trans == false +// A^T * x = b if trans == true +// In both cases, A is represented in LU factorized form, and the vector x is +// stored into dst. +// +// If A is singular or near-singular a Condition error is returned. See +// the documentation for Condition for more information. +// SolveVecTo will panic if the receiver does not contain a factorization. +func (lu *LU) SolveVecTo(dst *VecDense, trans bool, b Vector) error { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + if br, bc := b.Dims(); br != n || bc != 1 { + panic(ErrShape) + } + switch rv := b.(type) { + default: + dst.reuseAs(n) + return lu.SolveTo(dst.asDense(), trans, b) + case RawVectorer: + if dst != b { + dst.checkOverlap(rv.RawVector()) + } + // TODO(btracey): Should test the condition number instead of testing that + // the determinant is exactly zero. + if lu.Det() == 0 { + return Condition(math.Inf(1)) + } + + dst.reuseAs(n) + var restore func() + if dst == b { + dst, restore = dst.isolatedWorkspace(b) + defer restore() + } + dst.CopyVec(b) + vMat := blas64.General{ + Rows: n, + Cols: 1, + Stride: dst.mat.Inc, + Data: dst.mat.Data, + } + t := blas.NoTrans + if trans { + t = blas.Trans + } + lapack64.Getrs(t, lu.lu.mat, vMat, lu.pivot) + if lu.cond > ConditionTolerance { + return Condition(lu.cond) + } + return nil + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/matrix.go b/vendor/gonum.org/v1/gonum/mat/matrix.go new file mode 100644 index 00000000..444d0445 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/matrix.go @@ -0,0 +1,946 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Matrix is the basic matrix interface type. +type Matrix interface { + // Dims returns the dimensions of a Matrix. + Dims() (r, c int) + + // At returns the value of a matrix element at row i, column j. + // It will panic if i or j are out of bounds for the matrix. + At(i, j int) float64 + + // T returns the transpose of the Matrix. Whether T returns a copy of the + // underlying data is implementation dependent. + // This method may be implemented using the Transpose type, which + // provides an implicit matrix transpose. + T() Matrix +} + +var ( + _ Matrix = Transpose{} + _ Untransposer = Transpose{} +) + +// Transpose is a type for performing an implicit matrix transpose. It implements +// the Matrix interface, returning values from the transpose of the matrix within. +type Transpose struct { + Matrix Matrix +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Matrix field. +func (t Transpose) At(i, j int) float64 { + return t.Matrix.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. The number of rows returned +// is the number of columns in the Matrix field, and the number of columns is +// the number of rows in the Matrix field. +func (t Transpose) Dims() (r, c int) { + c, r = t.Matrix.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Matrix field. +func (t Transpose) T() Matrix { + return t.Matrix +} + +// Untranspose returns the Matrix field. +func (t Transpose) Untranspose() Matrix { + return t.Matrix +} + +// Untransposer is a type that can undo an implicit transpose. +type Untransposer interface { + // Note: This interface is needed to unify all of the Transpose types. In + // the mat methods, we need to test if the Matrix has been implicitly + // transposed. If this is checked by testing for the specific Transpose type + // then the behavior will be different if the user uses T() or TTri() for a + // triangular matrix. + + // Untranspose returns the underlying Matrix stored for the implicit transpose. + Untranspose() Matrix +} + +// UntransposeBander is a type that can undo an implicit band transpose. +type UntransposeBander interface { + // Untranspose returns the underlying Banded stored for the implicit transpose. + UntransposeBand() Banded +} + +// UntransposeTrier is a type that can undo an implicit triangular transpose. +type UntransposeTrier interface { + // Untranspose returns the underlying Triangular stored for the implicit transpose. + UntransposeTri() Triangular +} + +// UntransposeTriBander is a type that can undo an implicit triangular banded +// transpose. +type UntransposeTriBander interface { + // Untranspose returns the underlying Triangular stored for the implicit transpose. + UntransposeTriBand() TriBanded +} + +// Mutable is a matrix interface type that allows elements to be altered. +type Mutable interface { + // Set alters the matrix element at row i, column j to v. + // It will panic if i or j are out of bounds for the matrix. + Set(i, j int, v float64) + + Matrix +} + +// A RowViewer can return a Vector reflecting a row that is backed by the matrix +// data. The Vector returned will have length equal to the number of columns. +type RowViewer interface { + RowView(i int) Vector +} + +// A RawRowViewer can return a slice of float64 reflecting a row that is backed by the matrix +// data. +type RawRowViewer interface { + RawRowView(i int) []float64 +} + +// A ColViewer can return a Vector reflecting a column that is backed by the matrix +// data. The Vector returned will have length equal to the number of rows. +type ColViewer interface { + ColView(j int) Vector +} + +// A RawColViewer can return a slice of float64 reflecting a column that is backed by the matrix +// data. +type RawColViewer interface { + RawColView(j int) []float64 +} + +// A Cloner can make a copy of a into the receiver, overwriting the previous value of the +// receiver. The clone operation does not make any restriction on shape and will not cause +// shadowing. +type Cloner interface { + Clone(a Matrix) +} + +// A Reseter can reset the matrix so that it can be reused as the receiver of a dimensionally +// restricted operation. This is commonly used when the matrix is being used as a workspace +// or temporary matrix. +// +// If the matrix is a view, using the reset matrix may result in data corruption in elements +// outside the view. +type Reseter interface { + Reset() +} + +// A Copier can make a copy of elements of a into the receiver. The submatrix copied +// starts at row and column 0 and has dimensions equal to the minimum dimensions of +// the two matrices. The number of row and columns copied is returned. +// Copy will copy from a source that aliases the receiver unless the source is transposed; +// an aliasing transpose copy will panic with the exception for a special case when +// the source data has a unitary increment or stride. +type Copier interface { + Copy(a Matrix) (r, c int) +} + +// A Grower can grow the size of the represented matrix by the given number of rows and columns. +// Growing beyond the size given by the Caps method will result in the allocation of a new +// matrix and copying of the elements. If Grow is called with negative increments it will +// panic with ErrIndexOutOfRange. +type Grower interface { + Caps() (r, c int) + Grow(r, c int) Matrix +} + +// A BandWidther represents a banded matrix and can return the left and right half-bandwidths, k1 and +// k2. +type BandWidther interface { + BandWidth() (k1, k2 int) +} + +// A RawMatrixSetter can set the underlying blas64.General used by the receiver. There is no restriction +// on the shape of the receiver. Changes to the receiver's elements will be reflected in the blas64.General.Data. +type RawMatrixSetter interface { + SetRawMatrix(a blas64.General) +} + +// A RawMatrixer can return a blas64.General representation of the receiver. Changes to the blas64.General.Data +// slice will be reflected in the original matrix, changes to the Rows, Cols and Stride fields will not. +type RawMatrixer interface { + RawMatrix() blas64.General +} + +// A RawVectorer can return a blas64.Vector representation of the receiver. Changes to the blas64.Vector.Data +// slice will be reflected in the original matrix, changes to the Inc field will not. +type RawVectorer interface { + RawVector() blas64.Vector +} + +// A NonZeroDoer can call a function for each non-zero element of the receiver. +// The parameters of the function are the element indices and its value. +type NonZeroDoer interface { + DoNonZero(func(i, j int, v float64)) +} + +// A RowNonZeroDoer can call a function for each non-zero element of a row of the receiver. +// The parameters of the function are the element indices and its value. +type RowNonZeroDoer interface { + DoRowNonZero(i int, fn func(i, j int, v float64)) +} + +// A ColNonZeroDoer can call a function for each non-zero element of a column of the receiver. +// The parameters of the function are the element indices and its value. +type ColNonZeroDoer interface { + DoColNonZero(j int, fn func(i, j int, v float64)) +} + +// untranspose untransposes a matrix if applicable. If a is an Untransposer, then +// untranspose returns the underlying matrix and true. If it is not, then it returns +// the input matrix and false. +func untranspose(a Matrix) (Matrix, bool) { + if ut, ok := a.(Untransposer); ok { + return ut.Untranspose(), true + } + return a, false +} + +// untransposeExtract returns an untransposed matrix in a built-in matrix type. +// +// The untransposed matrix is returned unaltered if it is a built-in matrix type. +// Otherwise, if it implements a Raw method, an appropriate built-in type value +// is returned holding the raw matrix value of the input. If neither of these +// is possible, the untransposed matrix is returned. +func untransposeExtract(a Matrix) (Matrix, bool) { + ut, trans := untranspose(a) + switch m := ut.(type) { + case *DiagDense, *SymBandDense, *TriBandDense, *BandDense, *TriDense, *SymDense, *Dense: + return m, trans + // TODO(btracey): Add here if we ever have an equivalent of RawDiagDense. + case RawSymBander: + rsb := m.RawSymBand() + if rsb.Uplo != blas.Upper { + return ut, trans + } + var sb SymBandDense + sb.SetRawSymBand(rsb) + return &sb, trans + case RawTriBander: + rtb := m.RawTriBand() + if rtb.Diag == blas.Unit { + return ut, trans + } + var tb TriBandDense + tb.SetRawTriBand(rtb) + return &tb, trans + case RawBander: + var b BandDense + b.SetRawBand(m.RawBand()) + return &b, trans + case RawTriangular: + rt := m.RawTriangular() + if rt.Diag == blas.Unit { + return ut, trans + } + var t TriDense + t.SetRawTriangular(rt) + return &t, trans + case RawSymmetricer: + rs := m.RawSymmetric() + if rs.Uplo != blas.Upper { + return ut, trans + } + var s SymDense + s.SetRawSymmetric(rs) + return &s, trans + case RawMatrixer: + var d Dense + d.SetRawMatrix(m.RawMatrix()) + return &d, trans + default: + return ut, trans + } +} + +// TODO(btracey): Consider adding CopyCol/CopyRow if the behavior seems useful. +// TODO(btracey): Add in fast paths to Row/Col for the other concrete types +// (TriDense, etc.) as well as relevant interfaces (RowColer, RawRowViewer, etc.) + +// Col copies the elements in the jth column of the matrix into the slice dst. +// The length of the provided slice must equal the number of rows, unless the +// slice is nil in which case a new slice is first allocated. +func Col(dst []float64, j int, a Matrix) []float64 { + r, c := a.Dims() + if j < 0 || j >= c { + panic(ErrColAccess) + } + if dst == nil { + dst = make([]float64, r) + } else { + if len(dst) != r { + panic(ErrColLength) + } + } + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m := rm.RawMatrix() + if aTrans { + copy(dst, m.Data[j*m.Stride:j*m.Stride+m.Cols]) + return dst + } + blas64.Copy(blas64.Vector{N: r, Inc: m.Stride, Data: m.Data[j:]}, + blas64.Vector{N: r, Inc: 1, Data: dst}, + ) + return dst + } + for i := 0; i < r; i++ { + dst[i] = a.At(i, j) + } + return dst +} + +// Row copies the elements in the ith row of the matrix into the slice dst. +// The length of the provided slice must equal the number of columns, unless the +// slice is nil in which case a new slice is first allocated. +func Row(dst []float64, i int, a Matrix) []float64 { + r, c := a.Dims() + if i < 0 || i >= r { + panic(ErrColAccess) + } + if dst == nil { + dst = make([]float64, c) + } else { + if len(dst) != c { + panic(ErrRowLength) + } + } + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m := rm.RawMatrix() + if aTrans { + blas64.Copy(blas64.Vector{N: c, Inc: m.Stride, Data: m.Data[i:]}, + blas64.Vector{N: c, Inc: 1, Data: dst}, + ) + return dst + } + copy(dst, m.Data[i*m.Stride:i*m.Stride+m.Cols]) + return dst + } + for j := 0; j < c; j++ { + dst[j] = a.At(i, j) + } + return dst +} + +// Cond returns the condition number of the given matrix under the given norm. +// The condition number must be based on the 1-norm, 2-norm or ∞-norm. +// Cond will panic with matrix.ErrShape if the matrix has zero size. +// +// BUG(btracey): The computation of the 1-norm and ∞-norm for non-square matrices +// is innacurate, although is typically the right order of magnitude. See +// https://github.com/xianyi/OpenBLAS/issues/636. While the value returned will +// change with the resolution of this bug, the result from Cond will match the +// condition number used internally. +func Cond(a Matrix, norm float64) float64 { + m, n := a.Dims() + if m == 0 || n == 0 { + panic(ErrShape) + } + var lnorm lapack.MatrixNorm + switch norm { + default: + panic("mat: bad norm value") + case 1: + lnorm = lapack.MaxColumnSum + case 2: + var svd SVD + ok := svd.Factorize(a, SVDNone) + if !ok { + return math.Inf(1) + } + return svd.Cond() + case math.Inf(1): + lnorm = lapack.MaxRowSum + } + + if m == n { + // Use the LU decomposition to compute the condition number. + var lu LU + lu.factorize(a, lnorm) + return lu.Cond() + } + if m > n { + // Use the QR factorization to compute the condition number. + var qr QR + qr.factorize(a, lnorm) + return qr.Cond() + } + // Use the LQ factorization to compute the condition number. + var lq LQ + lq.factorize(a, lnorm) + return lq.Cond() +} + +// Det returns the determinant of the matrix a. In many expressions using LogDet +// will be more numerically stable. +func Det(a Matrix) float64 { + det, sign := LogDet(a) + return math.Exp(det) * sign +} + +// Dot returns the sum of the element-wise product of a and b. +// Dot panics if the matrix sizes are unequal. +func Dot(a, b Vector) float64 { + la := a.Len() + lb := b.Len() + if la != lb { + panic(ErrShape) + } + if arv, ok := a.(RawVectorer); ok { + if brv, ok := b.(RawVectorer); ok { + return blas64.Dot(arv.RawVector(), brv.RawVector()) + } + } + var sum float64 + for i := 0; i < la; i++ { + sum += a.At(i, 0) * b.At(i, 0) + } + return sum +} + +// Equal returns whether the matrices a and b have the same size +// and are element-wise equal. +func Equal(a, b Matrix) bool { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + if rma, ok := aU.(RawMatrixer); ok { + if rmb, ok := bU.(RawMatrixer); ok { + ra := rma.RawMatrix() + rb := rmb.RawMatrix() + if aTrans == bTrans { + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[i*rb.Stride+j] { + return false + } + } + } + return true + } + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[j*rb.Stride+i] { + return false + } + } + } + return true + } + } + if rma, ok := aU.(RawSymmetricer); ok { + if rmb, ok := bU.(RawSymmetricer); ok { + ra := rma.RawSymmetric() + rb := rmb.RawSymmetric() + // Symmetric matrices are always upper and equal to their transpose. + for i := 0; i < ra.N; i++ { + for j := i; j < ra.N; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[i*rb.Stride+j] { + return false + } + } + } + return true + } + } + if ra, ok := aU.(*VecDense); ok { + if rb, ok := bU.(*VecDense); ok { + // If the raw vectors are the same length they must either both be + // transposed or both not transposed (or have length 1). + for i := 0; i < ra.mat.N; i++ { + if ra.mat.Data[i*ra.mat.Inc] != rb.mat.Data[i*rb.mat.Inc] { + return false + } + } + return true + } + } + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if a.At(i, j) != b.At(i, j) { + return false + } + } + } + return true +} + +// EqualApprox returns whether the matrices a and b have the same size and contain all equal +// elements with tolerance for element-wise equality specified by epsilon. Matrices +// with non-equal shapes are not equal. +func EqualApprox(a, b Matrix, epsilon float64) bool { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + if rma, ok := aU.(RawMatrixer); ok { + if rmb, ok := bU.(RawMatrixer); ok { + ra := rma.RawMatrix() + rb := rmb.RawMatrix() + if aTrans == bTrans { + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[i*rb.Stride+j], epsilon, epsilon) { + return false + } + } + } + return true + } + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[j*rb.Stride+i], epsilon, epsilon) { + return false + } + } + } + return true + } + } + if rma, ok := aU.(RawSymmetricer); ok { + if rmb, ok := bU.(RawSymmetricer); ok { + ra := rma.RawSymmetric() + rb := rmb.RawSymmetric() + // Symmetric matrices are always upper and equal to their transpose. + for i := 0; i < ra.N; i++ { + for j := i; j < ra.N; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[i*rb.Stride+j], epsilon, epsilon) { + return false + } + } + } + return true + } + } + if ra, ok := aU.(*VecDense); ok { + if rb, ok := bU.(*VecDense); ok { + // If the raw vectors are the same length they must either both be + // transposed or both not transposed (or have length 1). + for i := 0; i < ra.mat.N; i++ { + if !floats.EqualWithinAbsOrRel(ra.mat.Data[i*ra.mat.Inc], rb.mat.Data[i*rb.mat.Inc], epsilon, epsilon) { + return false + } + } + return true + } + } + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if !floats.EqualWithinAbsOrRel(a.At(i, j), b.At(i, j), epsilon, epsilon) { + return false + } + } + } + return true +} + +// LogDet returns the log of the determinant and the sign of the determinant +// for the matrix that has been factorized. Numerical stability in product and +// division expressions is generally improved by working in log space. +func LogDet(a Matrix) (det float64, sign float64) { + // TODO(btracey): Add specialized routines for TriDense, etc. + var lu LU + lu.Factorize(a) + return lu.LogDet() +} + +// Max returns the largest element value of the matrix A. +// Max will panic with matrix.ErrShape if the matrix has zero size. +func Max(a Matrix) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + // Max(A) = Max(A^T) + aU, _ := untranspose(a) + switch m := aU.(type) { + case RawMatrixer: + rm := m.RawMatrix() + max := math.Inf(-1) + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + if v > max { + max = v + } + } + } + return max + case RawTriangular: + rm := m.RawTriangular() + // The max of a triangular is at least 0 unless the size is 1. + if rm.N == 1 { + return rm.Data[0] + } + max := 0.0 + if rm.Uplo == blas.Upper { + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v > max { + max = v + } + } + } + return max + } + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+i+1] { + if v > max { + max = v + } + } + } + return max + case RawSymmetricer: + rm := m.RawSymmetric() + if rm.Uplo != blas.Upper { + panic(badSymTriangle) + } + max := math.Inf(-1) + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v > max { + max = v + } + } + } + return max + default: + r, c := aU.Dims() + max := math.Inf(-1) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := aU.At(i, j) + if v > max { + max = v + } + } + } + return max + } +} + +// Min returns the smallest element value of the matrix A. +// Min will panic with matrix.ErrShape if the matrix has zero size. +func Min(a Matrix) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + // Min(A) = Min(A^T) + aU, _ := untranspose(a) + switch m := aU.(type) { + case RawMatrixer: + rm := m.RawMatrix() + min := math.Inf(1) + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + if v < min { + min = v + } + } + } + return min + case RawTriangular: + rm := m.RawTriangular() + // The min of a triangular is at most 0 unless the size is 1. + if rm.N == 1 { + return rm.Data[0] + } + min := 0.0 + if rm.Uplo == blas.Upper { + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v < min { + min = v + } + } + } + return min + } + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+i+1] { + if v < min { + min = v + } + } + } + return min + case RawSymmetricer: + rm := m.RawSymmetric() + if rm.Uplo != blas.Upper { + panic(badSymTriangle) + } + min := math.Inf(1) + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v < min { + min = v + } + } + } + return min + default: + r, c := aU.Dims() + min := math.Inf(1) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := aU.At(i, j) + if v < min { + min = v + } + } + } + return min + } +} + +// Norm returns the specified (induced) norm of the matrix a. See +// https://en.wikipedia.org/wiki/Matrix_norm for the definition of an induced norm. +// +// Valid norms are: +// 1 - The maximum absolute column sum +// 2 - Frobenius norm, the square root of the sum of the squares of the elements. +// Inf - The maximum absolute row sum. +// Norm will panic with ErrNormOrder if an illegal norm order is specified and +// with matrix.ErrShape if the matrix has zero size. +func Norm(a Matrix, norm float64) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + aU, aTrans := untranspose(a) + var work []float64 + switch rma := aU.(type) { + case RawMatrixer: + rm := rma.RawMatrix() + n := normLapack(norm, aTrans) + if n == lapack.MaxColumnSum { + work = getFloats(rm.Cols, false) + defer putFloats(work) + } + return lapack64.Lange(n, rm, work) + case RawTriangular: + rm := rma.RawTriangular() + n := normLapack(norm, aTrans) + if n == lapack.MaxRowSum || n == lapack.MaxColumnSum { + work = getFloats(rm.N, false) + defer putFloats(work) + } + return lapack64.Lantr(n, rm, work) + case RawSymmetricer: + rm := rma.RawSymmetric() + n := normLapack(norm, aTrans) + if n == lapack.MaxRowSum || n == lapack.MaxColumnSum { + work = getFloats(rm.N, false) + defer putFloats(work) + } + return lapack64.Lansy(n, rm, work) + case *VecDense: + rv := rma.RawVector() + switch norm { + default: + panic("unreachable") + case 1: + if aTrans { + imax := blas64.Iamax(rv) + return math.Abs(rma.At(imax, 0)) + } + return blas64.Asum(rv) + case 2: + return blas64.Nrm2(rv) + case math.Inf(1): + if aTrans { + return blas64.Asum(rv) + } + imax := blas64.Iamax(rv) + return math.Abs(rma.At(imax, 0)) + } + } + switch norm { + default: + panic("unreachable") + case 1: + var max float64 + for j := 0; j < c; j++ { + var sum float64 + for i := 0; i < r; i++ { + sum += math.Abs(a.At(i, j)) + } + if sum > max { + max = sum + } + } + return max + case 2: + var sum float64 + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := a.At(i, j) + sum += v * v + } + } + return math.Sqrt(sum) + case math.Inf(1): + var max float64 + for i := 0; i < r; i++ { + var sum float64 + for j := 0; j < c; j++ { + sum += math.Abs(a.At(i, j)) + } + if sum > max { + max = sum + } + } + return max + } +} + +// normLapack converts the float64 norm input in Norm to a lapack.MatrixNorm. +func normLapack(norm float64, aTrans bool) lapack.MatrixNorm { + switch norm { + case 1: + n := lapack.MaxColumnSum + if aTrans { + n = lapack.MaxRowSum + } + return n + case 2: + return lapack.Frobenius + case math.Inf(1): + n := lapack.MaxRowSum + if aTrans { + n = lapack.MaxColumnSum + } + return n + default: + panic(ErrNormOrder) + } +} + +// Sum returns the sum of the elements of the matrix. +func Sum(a Matrix) float64 { + // TODO(btracey): Add a fast path for the other supported matrix types. + + r, c := a.Dims() + var sum float64 + aU, _ := untranspose(a) + if rma, ok := aU.(RawMatrixer); ok { + rm := rma.RawMatrix() + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + sum += v + } + } + return sum + } + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + sum += a.At(i, j) + } + } + return sum +} + +// A Tracer can compute the trace of the matrix. Trace must panic if the +// matrix is not square. +type Tracer interface { + Trace() float64 +} + +// Trace returns the trace of the matrix. Trace will panic if the +// matrix is not square. +func Trace(a Matrix) float64 { + m, _ := untransposeExtract(a) + if t, ok := m.(Tracer); ok { + return t.Trace() + } + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + var v float64 + for i := 0; i < r; i++ { + v += a.At(i, i) + } + return v +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// use returns a float64 slice with l elements, using f if it +// has the necessary capacity, otherwise creating a new slice. +func use(f []float64, l int) []float64 { + if l <= cap(f) { + return f[:l] + } + return make([]float64, l) +} + +// useZeroed returns a float64 slice with l elements, using f if it +// has the necessary capacity, otherwise creating a new slice. The +// elements of the returned slice are guaranteed to be zero. +func useZeroed(f []float64, l int) []float64 { + if l <= cap(f) { + f = f[:l] + zero(f) + return f + } + return make([]float64, l) +} + +// zero zeros the given slice's elements. +func zero(f []float64) { + for i := range f { + f[i] = 0 + } +} + +// useInt returns an int slice with l elements, using i if it +// has the necessary capacity, otherwise creating a new slice. +func useInt(i []int, l int) []int { + if l <= cap(i) { + return i[:l] + } + return make([]int, l) +} diff --git a/vendor/gonum.org/v1/gonum/mat/offset.go b/vendor/gonum.org/v1/gonum/mat/offset.go new file mode 100644 index 00000000..af2c03b6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/offset.go @@ -0,0 +1,20 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!safe + +package mat + +import "unsafe" + +// offset returns the number of float64 values b[0] is after a[0]. +func offset(a, b []float64) int { + if &a[0] == &b[0] { + return 0 + } + // This expression must be atomic with respect to GC moves. + // At this stage this is true, because the GC does not + // move. See https://golang.org/issue/12445. + return int(uintptr(unsafe.Pointer(&b[0]))-uintptr(unsafe.Pointer(&a[0]))) / int(unsafe.Sizeof(float64(0))) +} diff --git a/vendor/gonum.org/v1/gonum/mat/offset_appengine.go b/vendor/gonum.org/v1/gonum/mat/offset_appengine.go new file mode 100644 index 00000000..df617478 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/offset_appengine.go @@ -0,0 +1,24 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine safe + +package mat + +import "reflect" + +var sizeOfFloat64 = int(reflect.TypeOf(float64(0)).Size()) + +// offset returns the number of float64 values b[0] is after a[0]. +func offset(a, b []float64) int { + va0 := reflect.ValueOf(a).Index(0) + vb0 := reflect.ValueOf(b).Index(0) + if va0.Addr() == vb0.Addr() { + return 0 + } + // This expression must be atomic with respect to GC moves. + // At this stage this is true, because the GC does not + // move. See https://golang.org/issue/12445. + return int(vb0.UnsafeAddr()-va0.UnsafeAddr()) / sizeOfFloat64 +} diff --git a/vendor/gonum.org/v1/gonum/mat/pool.go b/vendor/gonum.org/v1/gonum/mat/pool.go new file mode 100644 index 00000000..25ca29f1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/pool.go @@ -0,0 +1,236 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var tab64 = [64]byte{ + 0x3f, 0x00, 0x3a, 0x01, 0x3b, 0x2f, 0x35, 0x02, + 0x3c, 0x27, 0x30, 0x1b, 0x36, 0x21, 0x2a, 0x03, + 0x3d, 0x33, 0x25, 0x28, 0x31, 0x12, 0x1c, 0x14, + 0x37, 0x1e, 0x22, 0x0b, 0x2b, 0x0e, 0x16, 0x04, + 0x3e, 0x39, 0x2e, 0x34, 0x26, 0x1a, 0x20, 0x29, + 0x32, 0x24, 0x11, 0x13, 0x1d, 0x0a, 0x0d, 0x15, + 0x38, 0x2d, 0x19, 0x1f, 0x23, 0x10, 0x09, 0x0c, + 0x2c, 0x18, 0x0f, 0x08, 0x17, 0x07, 0x06, 0x05, +} + +// bits returns the ceiling of base 2 log of v. +// Approach based on http://stackoverflow.com/a/11398748. +func bits(v uint64) byte { + if v == 0 { + return 0 + } + v <<= 2 + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v |= v >> 32 + return tab64[((v-(v>>1))*0x07EDD5E59A4E28C2)>>58] - 1 +} + +var ( + // pool contains size stratified workspace Dense pools. + // Each pool element i returns sized matrices with a data + // slice capped at 1< 2. + if !m.IsZero() { + if fr != r { + panic(ErrShape) + } + if _, lc := factors[len(factors)-1].Dims(); lc != c { + panic(ErrShape) + } + } + + dims := make([]int, len(factors)+1) + dims[0] = r + dims[len(dims)-1] = c + pc := fc + for i, f := range factors[1:] { + cr, cc := f.Dims() + dims[i+1] = cr + if pc != cr { + panic(ErrShape) + } + pc = cc + } + + return &multiplier{ + factors: factors, + dims: dims, + table: newTable(len(factors)), + } +} + +// optimize determines an optimal matrix multiply operation order. +func (p *multiplier) optimize() { + if debugProductWalk { + fmt.Printf("chain dims: %v\n", p.dims) + } + const maxInt = int(^uint(0) >> 1) + for f := 1; f < len(p.factors); f++ { + for i := 0; i < len(p.factors)-f; i++ { + j := i + f + p.table.set(i, j, entry{cost: maxInt}) + for k := i; k < j; k++ { + cost := p.table.at(i, k).cost + p.table.at(k+1, j).cost + p.dims[i]*p.dims[k+1]*p.dims[j+1] + if cost < p.table.at(i, j).cost { + p.table.set(i, j, entry{cost: cost, k: k}) + } + } + } + } +} + +// multiply walks the optimal operation tree found by optimize, +// leaving the final result in the stack. It returns the +// product, which may be copied but should be returned to +// the workspace pool. +func (p *multiplier) multiply() *Dense { + result, _ := p.multiplySubchain(0, len(p.factors)-1) + if debugProductWalk { + r, c := result.Dims() + fmt.Printf("\tpop result (%d×%d) cost=%d\n", r, c, p.table.at(0, len(p.factors)-1).cost) + } + return result.(*Dense) +} + +func (p *multiplier) multiplySubchain(i, j int) (m Matrix, intermediate bool) { + if i == j { + return p.factors[i], false + } + + a, aTmp := p.multiplySubchain(i, p.table.at(i, j).k) + b, bTmp := p.multiplySubchain(p.table.at(i, j).k+1, j) + + ar, ac := a.Dims() + br, bc := b.Dims() + if ac != br { + // Panic with a string since this + // is not a user-facing panic. + panic(ErrShape.Error()) + } + + if debugProductWalk { + fmt.Printf("\tpush f[%d] (%d×%d)%s * f[%d] (%d×%d)%s\n", + i, ar, ac, result(aTmp), j, br, bc, result(bTmp)) + } + + r := getWorkspace(ar, bc, false) + r.Mul(a, b) + if aTmp { + putWorkspace(a.(*Dense)) + } + if bTmp { + putWorkspace(b.(*Dense)) + } + return r, true +} + +type entry struct { + k int // is the chain subdivision index. + cost int // cost is the cost of the operation. +} + +// table is a row major n×n dynamic programming table. +type table struct { + n int + entries []entry +} + +func newTable(n int) table { + return table{n: n, entries: make([]entry, n*n)} +} + +func (t table) at(i, j int) entry { return t.entries[i*t.n+j] } +func (t table) set(i, j int, e entry) { t.entries[i*t.n+j] = e } + +type result bool + +func (r result) String() string { + if r { + return " (popped result)" + } + return "" +} diff --git a/vendor/gonum.org/v1/gonum/mat/qr.go b/vendor/gonum.org/v1/gonum/mat/qr.go new file mode 100644 index 00000000..bf38ee4d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/qr.go @@ -0,0 +1,260 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const badQR = "mat: invalid QR factorization" + +// QR is a type for creating and using the QR factorization of a matrix. +type QR struct { + qr *Dense + tau []float64 + cond float64 +} + +func (qr *QR) updateCond(norm lapack.MatrixNorm) { + // Since A = Q*R, and Q is orthogonal, we get for the condition number κ + // κ(A) := |A| |A^-1| = |Q*R| |(Q*R)^-1| = |R| |R^-1 * Q^T| + // = |R| |R^-1| = κ(R), + // where we used that fact that Q^-1 = Q^T. However, this assumes that + // the matrix norm is invariant under orthogonal transformations which + // is not the case for CondNorm. Hopefully the error is negligible: κ + // is only a qualitative measure anyway. + n := qr.qr.mat.Cols + work := getFloats(3*n, false) + iwork := getInts(n, false) + r := qr.qr.asTriDense(n, blas.NonUnit, blas.Upper) + v := lapack64.Trcon(norm, r.mat, work, iwork) + putFloats(work) + putInts(iwork) + qr.cond = 1 / v +} + +// Factorize computes the QR factorization of an m×n matrix a where m >= n. The QR +// factorization always exists even if A is singular. +// +// The QR decomposition is a factorization of the matrix A such that A = Q * R. +// The matrix Q is an orthonormal m×m matrix, and R is an m×n upper triangular matrix. +// Q and R can be extracted using the QTo and RTo methods. +func (qr *QR) Factorize(a Matrix) { + qr.factorize(a, CondNorm) +} + +func (qr *QR) factorize(a Matrix, norm lapack.MatrixNorm) { + m, n := a.Dims() + if m < n { + panic(ErrShape) + } + k := min(m, n) + if qr.qr == nil { + qr.qr = &Dense{} + } + qr.qr.Clone(a) + work := []float64{0} + qr.tau = make([]float64, k) + lapack64.Geqrf(qr.qr.mat, qr.tau, work, -1) + + work = getFloats(int(work[0]), false) + lapack64.Geqrf(qr.qr.mat, qr.tau, work, len(work)) + putFloats(work) + qr.updateCond(norm) +} + +// isValid returns whether the receiver contains a factorization. +func (qr *QR) isValid() bool { + return qr.qr != nil && !qr.qr.IsZero() +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a factorization. +func (qr *QR) Cond() float64 { + if !qr.isValid() { + panic(badQR) + } + return qr.cond +} + +// TODO(btracey): Add in the "Reduced" forms for extracting the n×n orthogonal +// and upper triangular matrices. + +// RTo extracts the m×n upper trapezoidal matrix from a QR decomposition. +// If dst is nil, a new matrix is allocated. The resulting dst matrix is returned. +// RTo will panic if the receiver does not contain a factorization. +func (qr *QR) RTo(dst *Dense) *Dense { + if !qr.isValid() { + panic(badQR) + } + + r, c := qr.qr.Dims() + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + // Disguise the QR as an upper triangular + t := &TriDense{ + mat: blas64.Triangular{ + N: c, + Stride: qr.qr.mat.Stride, + Data: qr.qr.mat.Data, + Uplo: blas.Upper, + Diag: blas.NonUnit, + }, + cap: qr.qr.capCols, + } + dst.Copy(t) + + // Zero below the triangular. + for i := r; i < c; i++ { + zero(dst.mat.Data[i*dst.mat.Stride : i*dst.mat.Stride+c]) + } + + return dst +} + +// QTo extracts the m×m orthonormal matrix Q from a QR decomposition. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +// QTo will panic if the receiver does not contain a factorization. +func (qr *QR) QTo(dst *Dense) *Dense { + if !qr.isValid() { + panic(badQR) + } + + r, _ := qr.qr.Dims() + if dst == nil { + dst = NewDense(r, r, nil) + } else { + dst.reuseAsZeroed(r, r) + } + + // Set Q = I. + for i := 0; i < r*r; i += r + 1 { + dst.mat.Data[i] = 1 + } + + // Construct Q from the elementary reflectors. + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, dst.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, dst.mat, work, len(work)) + putFloats(work) + + return dst +} + +// SolveTo finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and b, where A is an m×n matrix represented in its QR factorized +// form. If A is singular or near-singular a Condition error is returned. +// See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters. +// If trans == false, find X such that ||A*X - B||_2 is minimized. +// If trans == true, find the minimum norm solution of A^T * X = B. +// The solution matrix, X, is stored in place into dst. +// SolveTo will panic if the receiver does not contain a factorization. +func (qr *QR) SolveTo(dst *Dense, trans bool, b Matrix) error { + if !qr.isValid() { + panic(badQR) + } + + r, c := qr.qr.Dims() + br, bc := b.Dims() + + // The QR solve algorithm stores the result in-place into the right hand side. + // The storage for the answer must be large enough to hold both b and x. + // However, this method's receiver must be the size of x. Copy b, and then + // copy the result into m at the end. + if trans { + if c != br { + panic(ErrShape) + } + dst.reuseAs(r, bc) + } else { + if r != br { + panic(ErrShape) + } + dst.reuseAs(c, bc) + } + // Do not need to worry about overlap between m and b because x has its own + // independent storage. + w := getWorkspace(max(r, c), bc, false) + w.Copy(b) + t := qr.qr.asTriDense(qr.qr.mat.Cols, blas.NonUnit, blas.Upper).mat + if trans { + ok := lapack64.Trtrs(blas.Trans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + for i := c; i < r; i++ { + zero(w.mat.Data[i*w.mat.Stride : i*w.mat.Stride+bc]) + } + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, w.mat, work, len(work)) + putFloats(work) + } else { + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.Trans, qr.qr.mat, qr.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.Trans, qr.qr.mat, qr.tau, w.mat, work, len(work)) + putFloats(work) + + ok := lapack64.Trtrs(blas.NoTrans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + } + // X was set above to be the correct size for the result. + dst.Copy(w) + putWorkspace(w) + if qr.cond > ConditionTolerance { + return Condition(qr.cond) + } + return nil +} + +// SolveVecTo finds a minimum-norm solution to a system of linear equations, +// Ax = b. +// See QR.SolveTo for the full documentation. +// SolveVecTo will panic if the receiver does not contain a factorization. +func (qr *QR) SolveVecTo(dst *VecDense, trans bool, b Vector) error { + if !qr.isValid() { + panic(badQR) + } + + r, c := qr.qr.Dims() + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + bm := Matrix(b) + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if dst != b { + dst.checkOverlap(bmat) + } + b := VecDense{mat: bmat} + bm = b.asDense() + } + if trans { + dst.reuseAs(r) + } else { + dst.reuseAs(c) + } + return qr.SolveTo(dst.asDense(), trans, bm) + +} diff --git a/vendor/gonum.org/v1/gonum/mat/shadow.go b/vendor/gonum.org/v1/gonum/mat/shadow.go new file mode 100644 index 00000000..cc62e44f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/shadow.go @@ -0,0 +1,226 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" +) + +const ( + // regionOverlap is the panic string used for the general case + // of a matrix region overlap between a source and destination. + regionOverlap = "mat: bad region: overlap" + + // regionIdentity is the panic string used for the specific + // case of complete agreement between a source and a destination. + regionIdentity = "mat: bad region: identical" + + // mismatchedStrides is the panic string used for overlapping + // data slices with differing strides. + mismatchedStrides = "mat: bad region: different strides" +) + +// checkOverlap returns false if the receiver does not overlap data elements +// referenced by the parameter and panics otherwise. +// +// checkOverlap methods return a boolean to allow the check call to be added to a +// boolean expression, making use of short-circuit operators. +func checkOverlap(a, b blas64.General) bool { + if cap(a.Data) == 0 || cap(b.Data) == 0 { + return false + } + + off := offset(a.Data[:1], b.Data[:1]) + + if off == 0 { + // At least one element overlaps. + if a.Cols == b.Cols && a.Rows == b.Rows && a.Stride == b.Stride { + panic(regionIdentity) + } + panic(regionOverlap) + } + + if off > 0 && len(a.Data) <= off { + // We know a is completely before b. + return false + } + if off < 0 && len(b.Data) <= -off { + // We know a is completely after b. + return false + } + + if a.Stride != b.Stride { + // Too hard, so assume the worst. + panic(mismatchedStrides) + } + + if off < 0 { + off = -off + a.Cols, b.Cols = b.Cols, a.Cols + } + if rectanglesOverlap(off, a.Cols, b.Cols, a.Stride) { + panic(regionOverlap) + } + return false +} + +func (m *Dense) checkOverlap(a blas64.General) bool { + return checkOverlap(m.RawMatrix(), a) +} + +func (m *Dense) checkOverlapMatrix(a Matrix) bool { + if m == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return m.checkOverlap(amat) +} + +func (s *SymDense) checkOverlap(a blas64.General) bool { + return checkOverlap(generalFromSymmetric(s.RawSymmetric()), a) +} + +func (s *SymDense) checkOverlapMatrix(a Matrix) bool { + if s == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return s.checkOverlap(amat) +} + +// generalFromSymmetric returns a blas64.General with the backing +// data and dimensions of a. +func generalFromSymmetric(a blas64.Symmetric) blas64.General { + return blas64.General{ + Rows: a.N, + Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } +} + +func (t *TriDense) checkOverlap(a blas64.General) bool { + return checkOverlap(generalFromTriangular(t.RawTriangular()), a) +} + +func (t *TriDense) checkOverlapMatrix(a Matrix) bool { + if t == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return t.checkOverlap(amat) +} + +// generalFromTriangular returns a blas64.General with the backing +// data and dimensions of a. +func generalFromTriangular(a blas64.Triangular) blas64.General { + return blas64.General{ + Rows: a.N, + Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } +} + +func (v *VecDense) checkOverlap(a blas64.Vector) bool { + mat := v.mat + if cap(mat.Data) == 0 || cap(a.Data) == 0 { + return false + } + + off := offset(mat.Data[:1], a.Data[:1]) + + if off == 0 { + // At least one element overlaps. + if mat.Inc == a.Inc && len(mat.Data) == len(a.Data) { + panic(regionIdentity) + } + panic(regionOverlap) + } + + if off > 0 && len(mat.Data) <= off { + // We know v is completely before a. + return false + } + if off < 0 && len(a.Data) <= -off { + // We know v is completely after a. + return false + } + + if mat.Inc != a.Inc { + // Too hard, so assume the worst. + panic(mismatchedStrides) + } + + if mat.Inc == 1 || off&mat.Inc == 0 { + panic(regionOverlap) + } + return false +} + +// rectanglesOverlap returns whether the strided rectangles a and b overlap +// when b is offset by off elements after a but has at least one element before +// the end of a. off must be positive. a and b have aCols and bCols respectively. +// +// rectanglesOverlap works by shifting both matrices left such that the left +// column of a is at 0. The column indexes are flattened by obtaining the shifted +// relative left and right column positions modulo the common stride. This allows +// direct comparison of the column offsets when the matrix backing data slices +// are known to overlap. +func rectanglesOverlap(off, aCols, bCols, stride int) bool { + if stride == 1 { + // Unit stride means overlapping data + // slices must overlap as matrices. + return true + } + + // Flatten the shifted matrix column positions + // so a starts at 0, modulo the common stride. + aTo := aCols + // The mod stride operations here make the from + // and to indexes comparable between a and b when + // the data slices of a and b overlap. + bFrom := off % stride + bTo := (bFrom + bCols) % stride + + if bTo == 0 || bFrom < bTo { + // b matrix is not wrapped: compare for + // simple overlap. + return bFrom < aTo + } + + // b strictly wraps and so must overlap with a. + return true +} diff --git a/vendor/gonum.org/v1/gonum/mat/solve.go b/vendor/gonum.org/v1/gonum/mat/solve.go new file mode 100644 index 00000000..11813280 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/solve.go @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Solve finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and B. If A is singular or near-singular, a Condition error +// is returned. See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters: +// - if m >= n, find X such that ||A*X - B||_2 is minimized, +// - if m < n, find the minimum norm solution of A * X = B. +// The solution matrix, X, is stored in-place into the receiver. +func (m *Dense) Solve(a, b Matrix) error { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br { + panic(ErrShape) + } + m.reuseAs(ac, bc) + + // TODO(btracey): Add special cases for SymDense, etc. + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + switch rma := aU.(type) { + case RawTriangular: + side := blas.Left + tA := blas.NoTrans + if aTrans { + tA = blas.Trans + } + + switch rm := bU.(type) { + case RawMatrixer: + if m != bU || bTrans { + if m == bU || m.checkOverlap(rm.RawMatrix()) { + tmp := getWorkspace(br, bc, false) + tmp.Copy(b) + m.Copy(tmp) + putWorkspace(tmp) + break + } + m.Copy(b) + } + default: + if m != bU { + m.Copy(b) + } else if bTrans { + // m and b share data so Copy cannot be used directly. + tmp := getWorkspace(br, bc, false) + tmp.Copy(b) + m.Copy(tmp) + putWorkspace(tmp) + } + } + + rm := rma.RawTriangular() + blas64.Trsm(side, tA, 1, rm, m.mat) + work := getFloats(3*rm.N, false) + iwork := getInts(rm.N, false) + cond := lapack64.Trcon(CondNorm, rm, work, iwork) + putFloats(work) + putInts(iwork) + if cond > ConditionTolerance { + return Condition(cond) + } + return nil + } + + switch { + case ar == ac: + if a == b { + // x = I. + if ar == 1 { + m.mat.Data[0] = 1 + return nil + } + for i := 0; i < ar; i++ { + v := m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+ac] + zero(v) + v[i] = 1 + } + return nil + } + var lu LU + lu.Factorize(a) + return lu.SolveTo(m, false, b) + case ar > ac: + var qr QR + qr.Factorize(a) + return qr.SolveTo(m, false, b) + default: + var lq LQ + lq.Factorize(a) + return lq.SolveTo(m, false, b) + } +} + +// SolveVec finds a minimum-norm solution to a system of linear equations defined +// by the matrix a and the right-hand side column vector b. If A is singular or +// near-singular, a Condition error is returned. See the documentation for +// Dense.Solve for more information. +func (v *VecDense) SolveVec(a Matrix, b Vector) error { + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + _, c := a.Dims() + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + v.reuseAs(c) + m := v.asDense() + // We conditionally create bm as m when b and v are identical + // to prevent the overlap detection code from identifying m + // and bm as overlapping but not identical. + bm := m + if v != b { + b := VecDense{mat: bmat} + bm = b.asDense() + } + return m.Solve(a, bm) + } + + v.reuseAs(c) + m := v.asDense() + return m.Solve(a, b) +} diff --git a/vendor/gonum.org/v1/gonum/mat/svd.go b/vendor/gonum.org/v1/gonum/mat/svd.go new file mode 100644 index 00000000..2f55c411 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/svd.go @@ -0,0 +1,247 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// SVD is a type for creating and using the Singular Value Decomposition (SVD) +// of a matrix. +type SVD struct { + kind SVDKind + + s []float64 + u blas64.General + vt blas64.General +} + +// SVDKind specifies the treatment of singular vectors during an SVD +// factorization. +type SVDKind int + +const ( + // SVDNone specifies that no singular vectors should be computed during + // the decomposition. + SVDNone SVDKind = 0 + + // SVDThinU specifies the thin decomposition for U should be computed. + SVDThinU SVDKind = 1 << (iota - 1) + // SVDFullU specifies the full decomposition for U should be computed. + SVDFullU + // SVDThinV specifies the thin decomposition for V should be computed. + SVDThinV + // SVDFullV specifies the full decomposition for V should be computed. + SVDFullV + + // SVDThin is a convenience value for computing both thin vectors. + SVDThin SVDKind = SVDThinU | SVDThinV + // SVDThin is a convenience value for computing both full vectors. + SVDFull SVDKind = SVDFullU | SVDFullV +) + +// succFact returns whether the receiver contains a successful factorization. +func (svd *SVD) succFact() bool { + return len(svd.s) != 0 +} + +// Factorize computes the singular value decomposition (SVD) of the input matrix A. +// The singular values of A are computed in all cases, while the singular +// vectors are optionally computed depending on the input kind. +// +// The full singular value decomposition (kind == SVDFull) is a factorization +// of an m×n matrix A of the form +// A = U * Σ * V^T +// where Σ is an m×n diagonal matrix, U is an m×m orthogonal matrix, and V is an +// n×n orthogonal matrix. The diagonal elements of Σ are the singular values of A. +// The first min(m,n) columns of U and V are, respectively, the left and right +// singular vectors of A. +// +// Significant storage space can be saved by using the thin representation of +// the SVD (kind == SVDThin) instead of the full SVD, especially if +// m >> n or m << n. The thin SVD finds +// A = U~ * Σ * V~^T +// where U~ is of size m×min(m,n), Σ is a diagonal matrix of size min(m,n)×min(m,n) +// and V~ is of size n×min(m,n). +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (svd *SVD) Factorize(a Matrix, kind SVDKind) (ok bool) { + // kill previous factorization + svd.s = svd.s[:0] + svd.kind = kind + + m, n := a.Dims() + var jobU, jobVT lapack.SVDJob + + // TODO(btracey): This code should be modified to have the smaller + // matrix written in-place into aCopy when the lapack/native/dgesvd + // implementation is complete. + switch { + case kind&SVDFullU != 0: + jobU = lapack.SVDAll + svd.u = blas64.General{ + Rows: m, + Cols: m, + Stride: m, + Data: use(svd.u.Data, m*m), + } + case kind&SVDThinU != 0: + jobU = lapack.SVDStore + svd.u = blas64.General{ + Rows: m, + Cols: min(m, n), + Stride: min(m, n), + Data: use(svd.u.Data, m*min(m, n)), + } + default: + jobU = lapack.SVDNone + } + switch { + case kind&SVDFullV != 0: + svd.vt = blas64.General{ + Rows: n, + Cols: n, + Stride: n, + Data: use(svd.vt.Data, n*n), + } + jobVT = lapack.SVDAll + case kind&SVDThinV != 0: + svd.vt = blas64.General{ + Rows: min(m, n), + Cols: n, + Stride: n, + Data: use(svd.vt.Data, min(m, n)*n), + } + jobVT = lapack.SVDStore + default: + jobVT = lapack.SVDNone + } + + // A is destroyed on call, so copy the matrix. + aCopy := DenseCopyOf(a) + svd.kind = kind + svd.s = use(svd.s, min(m, n)) + + work := []float64{0} + lapack64.Gesvd(jobU, jobVT, aCopy.mat, svd.u, svd.vt, svd.s, work, -1) + work = getFloats(int(work[0]), false) + ok = lapack64.Gesvd(jobU, jobVT, aCopy.mat, svd.u, svd.vt, svd.s, work, len(work)) + putFloats(work) + if !ok { + svd.kind = 0 + } + return ok +} + +// Kind returns the SVDKind of the decomposition. If no decomposition has been +// computed, Kind returns -1. +func (svd *SVD) Kind() SVDKind { + if !svd.succFact() { + return -1 + } + return svd.kind +} + +// Cond returns the 2-norm condition number for the factorized matrix. Cond will +// panic if the receiver does not contain a successful factorization. +func (svd *SVD) Cond() float64 { + if !svd.succFact() { + panic(badFact) + } + return svd.s[0] / svd.s[len(svd.s)-1] +} + +// Values returns the singular values of the factorized matrix in descending order. +// +// If the input slice is non-nil, the values will be stored in-place into +// the slice. In this case, the slice must have length min(m,n), and Values will +// panic with ErrSliceLengthMismatch otherwise. If the input slice is nil, a new +// slice of the appropriate length will be allocated and returned. +// +// Values will panic if the receiver does not contain a successful factorization. +func (svd *SVD) Values(s []float64) []float64 { + if !svd.succFact() { + panic(badFact) + } + if s == nil { + s = make([]float64, len(svd.s)) + } + if len(s) != len(svd.s) { + panic(ErrSliceLengthMismatch) + } + copy(s, svd.s) + return s +} + +// UTo extracts the matrix U from the singular value decomposition. The first +// min(m,n) columns are the left singular vectors and correspond to the singular +// values as returned from SVD.Values. +// +// If dst is not nil, U is stored in-place into dst, and dst must have size +// m×m if the full U was computed, size m×min(m,n) if the thin U was computed, +// and UTo panics otherwise. If dst is nil, a new matrix of the appropriate size +// is allocated and returned. +func (svd *SVD) UTo(dst *Dense) *Dense { + if !svd.succFact() { + panic(badFact) + } + kind := svd.kind + if kind&SVDThinU == 0 && kind&SVDFullU == 0 { + panic("svd: u not computed during factorization") + } + r := svd.u.Rows + c := svd.u.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: svd.u, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + + return dst +} + +// VTo extracts the matrix V from the singular value decomposition. The first +// min(m,n) columns are the right singular vectors and correspond to the singular +// values as returned from SVD.Values. +// +// If dst is not nil, V is stored in-place into dst, and dst must have size +// n×n if the full V was computed, size n×min(m,n) if the thin V was computed, +// and VTo panics otherwise. If dst is nil, a new matrix of the appropriate size +// is allocated and returned. +func (svd *SVD) VTo(dst *Dense) *Dense { + if !svd.succFact() { + panic(badFact) + } + kind := svd.kind + if kind&SVDThinU == 0 && kind&SVDFullV == 0 { + panic("svd: v not computed during factorization") + } + r := svd.vt.Rows + c := svd.vt.Cols + if dst == nil { + dst = NewDense(c, r, nil) + } else { + dst.reuseAs(c, r) + } + + tmp := &Dense{ + mat: svd.vt, + capRows: r, + capCols: c, + } + dst.Copy(tmp.T()) + + return dst +} diff --git a/vendor/gonum.org/v1/gonum/mat/symband.go b/vendor/gonum.org/v1/gonum/mat/symband.go new file mode 100644 index 00000000..add9a807 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/symband.go @@ -0,0 +1,221 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + symBandDense *SymBandDense + _ Matrix = symBandDense + _ Symmetric = symBandDense + _ Banded = symBandDense + _ SymBanded = symBandDense + _ RawSymBander = symBandDense + _ MutableSymBanded = symBandDense + + _ NonZeroDoer = symBandDense + _ RowNonZeroDoer = symBandDense + _ ColNonZeroDoer = symBandDense +) + +// SymBandDense represents a symmetric band matrix in dense storage format. +type SymBandDense struct { + mat blas64.SymmetricBand +} + +// SymBanded is a symmetric band matrix interface type. +type SymBanded interface { + Banded + + // Symmetric returns the number of rows/columns in the matrix. + Symmetric() int + + // SymBand returns the number of rows/columns in the matrix, and the size of + // the bandwidth. + SymBand() (n, k int) +} + +// MutableSymBanded is a symmetric band matrix interface type that allows elements +// to be altered. +type MutableSymBanded interface { + SymBanded + SetSymBand(i, j int, v float64) +} + +// A RawSymBander can return a blas64.SymmetricBand representation of the receiver. +// Changes to the blas64.SymmetricBand.Data slice will be reflected in the original +// matrix, changes to the N, K, Stride and Uplo fields will not. +type RawSymBander interface { + RawSymBand() blas64.SymmetricBand +} + +// NewSymBandDense creates a new SymBand matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*(k+1), +// data is used as the backing slice, and changes to the elements of the returned +// SymBandDense will be reflected in data. If neither of these is true, NewSymBandDense +// will panic. k must be at least zero and less than n, otherwise NewSymBandDense will panic. +// +// The data must be arranged in row-major order constructed by removing the zeros +// from the rows outside the band and aligning the diagonals. SymBandDense matrices +// are stored in the upper triangle. For example, the matrix +// 1 2 3 0 0 0 +// 2 4 5 6 0 0 +// 3 5 7 8 9 0 +// 0 6 8 10 11 12 +// 0 0 9 11 13 14 +// 0 0 0 12 14 15 +// becomes (* entries are never accessed) +// 1 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 * +// 15 * * +// which is passed to NewSymBandDense as []float64{1, 2, ..., 15, *, *, *} with k=2. +// Only the values in the band portion of the matrix are used. +func NewSymBandDense(n, k int, data []float64) *SymBandDense { + if n <= 0 || k < 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if k+1 > n { + panic("mat: band out of range") + } + bc := k + 1 + if data != nil && len(data) != n*bc { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*bc) + } + return &SymBandDense{ + mat: blas64.SymmetricBand{ + N: n, + K: k, + Stride: bc, + Uplo: blas.Upper, + Data: data, + }, + } +} + +// Dims returns the number of rows and columns in the matrix. +func (s *SymBandDense) Dims() (r, c int) { + return s.mat.N, s.mat.N +} + +// Symmetric returns the size of the receiver. +func (s *SymBandDense) Symmetric() int { + return s.mat.N +} + +// Bandwidth returns the bandwidths of the matrix. +func (s *SymBandDense) Bandwidth() (kl, ku int) { + return s.mat.K, s.mat.K +} + +// SymBand returns the number of rows/columns in the matrix, and the size of +// the bandwidth. +func (s *SymBandDense) SymBand() (n, k int) { + return s.mat.N, s.mat.K +} + +// T implements the Matrix interface. Symmetric matrices, by definition, are +// equal to their transpose, and this is a no-op. +func (s *SymBandDense) T() Matrix { + return s +} + +// TBand implements the Banded interface. +func (s *SymBandDense) TBand() Banded { + return s +} + +// RawSymBand returns the underlying blas64.SymBand used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.SymBand. +func (s *SymBandDense) RawSymBand() blas64.SymmetricBand { + return s.mat +} + +// SetRawSymBand sets the underlying blas64.SymmetricBand used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +// +// The supplied SymmetricBand must use blas.Upper storage format. +func (s *SymBandDense) SetRawSymBand(mat blas64.SymmetricBand) { + if mat.Uplo != blas.Upper { + panic("mat: blas64.SymmetricBand does not have blas.Upper storage") + } + s.mat = mat +} + +// Zero sets all of the matrix elements to zero. +func (s *SymBandDense) Zero() { + for i := 0; i < s.mat.N; i++ { + u := min(1+s.mat.K, s.mat.N-i) + zero(s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+u]) + } +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (s *SymBandDense) DiagView() Diagonal { + n := s.mat.N + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: s.mat.Stride, + Data: s.mat.Data[:(n-1)*s.mat.Stride+1], + }, + } +} + +// DoNonZero calls the function fn for each of the non-zero elements of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoNonZero(fn func(i, j int, v float64)) { + for i := 0; i < s.mat.N; i++ { + for j := max(0, i-s.mat.K); j < min(s.mat.N, i+s.mat.K+1); j++ { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || s.mat.N <= i { + panic(ErrRowAccess) + } + for j := max(0, i-s.mat.K); j < min(s.mat.N, i+s.mat.K+1); j++ { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || s.mat.N <= j { + panic(ErrColAccess) + } + for i := 0; i < s.mat.N; i++ { + if i-s.mat.K <= j && j < i+s.mat.K+1 { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/symmetric.go b/vendor/gonum.org/v1/gonum/mat/symmetric.go new file mode 100644 index 00000000..2ea5bdb0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/symmetric.go @@ -0,0 +1,602 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + symDense *SymDense + + _ Matrix = symDense + _ Symmetric = symDense + _ RawSymmetricer = symDense + _ MutableSymmetric = symDense +) + +const ( + badSymTriangle = "mat: blas64.Symmetric not upper" + badSymCap = "mat: bad capacity for SymDense" +) + +// SymDense is a symmetric matrix that uses dense storage. SymDense +// matrices are stored in the upper triangle. +type SymDense struct { + mat blas64.Symmetric + cap int +} + +// Symmetric represents a symmetric matrix (where the element at {i, j} equals +// the element at {j, i}). Symmetric matrices are always square. +type Symmetric interface { + Matrix + // Symmetric returns the number of rows/columns in the matrix. + Symmetric() int +} + +// A RawSymmetricer can return a view of itself as a BLAS Symmetric matrix. +type RawSymmetricer interface { + RawSymmetric() blas64.Symmetric +} + +// A MutableSymmetric can set elements of a symmetric matrix. +type MutableSymmetric interface { + Symmetric + SetSym(i, j int, v float64) +} + +// NewSymDense creates a new Symmetric matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*n, data is +// used as the backing slice, and changes to the elements of the returned SymDense +// will be reflected in data. If neither of these is true, NewSymDense will panic. +// NewSymDense will panic if n is zero. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +// Only the values in the upper triangular portion of the matrix are used. +func NewSymDense(n int, data []float64) *SymDense { + if n <= 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data != nil && n*n != len(data) { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*n) + } + return &SymDense{ + mat: blas64.Symmetric{ + N: n, + Stride: n, + Data: data, + Uplo: blas.Upper, + }, + cap: n, + } +} + +// Dims returns the number of rows and columns in the matrix. +func (s *SymDense) Dims() (r, c int) { + return s.mat.N, s.mat.N +} + +// Caps returns the number of rows and columns in the backing matrix. +func (s *SymDense) Caps() (r, c int) { + return s.cap, s.cap +} + +// T returns the receiver, the transpose of a symmetric matrix. +func (s *SymDense) T() Matrix { + return s +} + +// Symmetric implements the Symmetric interface and returns the number of rows +// and columns in the matrix. +func (s *SymDense) Symmetric() int { + return s.mat.N +} + +// RawSymmetric returns the matrix as a blas64.Symmetric. The returned +// value must be stored in upper triangular format. +func (s *SymDense) RawSymmetric() blas64.Symmetric { + return s.mat +} + +// SetRawSymmetric sets the underlying blas64.Symmetric used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +// +// The supplied Symmetric must use blas.Upper storage format. +func (s *SymDense) SetRawSymmetric(mat blas64.Symmetric) { + if mat.Uplo != blas.Upper { + panic(badSymTriangle) + } + s.mat = mat +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (s *SymDense) Reset() { + // N and Stride must be zeroed in unison. + s.mat.N, s.mat.Stride = 0, 0 + s.mat.Data = s.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (s *SymDense) Zero() { + for i := 0; i < s.mat.N; i++ { + zero(s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+s.mat.N]) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. SymDense matrices can be zeroed using Reset. +func (s *SymDense) IsZero() bool { + // It must be the case that m.Dims() returns + // zeros in this case. See comment in Reset(). + return s.mat.N == 0 +} + +// reuseAs resizes an empty matrix to a n×n matrix, +// or checks that a non-empty matrix is n×n. +func (s *SymDense) reuseAs(n int) { + if n == 0 { + panic(ErrZeroLength) + } + if s.mat.N > s.cap { + panic(badSymCap) + } + if s.IsZero() { + s.mat = blas64.Symmetric{ + N: n, + Stride: n, + Data: use(s.mat.Data, n*n), + Uplo: blas.Upper, + } + s.cap = n + return + } + if s.mat.Uplo != blas.Upper { + panic(badSymTriangle) + } + if s.mat.N != n { + panic(ErrShape) + } +} + +func (s *SymDense) isolatedWorkspace(a Symmetric) (w *SymDense, restore func()) { + n := a.Symmetric() + if n == 0 { + panic(ErrZeroLength) + } + w = getWorkspaceSym(n, false) + return w, func() { + s.CopySym(w) + putWorkspaceSym(w) + } +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (s *SymDense) DiagView() Diagonal { + n := s.mat.N + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: s.mat.Stride + 1, + Data: s.mat.Data[:(n-1)*s.mat.Stride+n], + }, + } +} + +func (s *SymDense) AddSym(a, b Symmetric) { + n := a.Symmetric() + if n != b.Symmetric() { + panic(ErrShape) + } + s.reuseAs(n) + + if a, ok := a.(RawSymmetricer); ok { + if b, ok := b.(RawSymmetricer); ok { + amat, bmat := a.RawSymmetric(), b.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(amat)) + } + if s != b { + s.checkOverlap(generalFromSymmetric(bmat)) + } + for i := 0; i < n; i++ { + btmp := bmat.Data[i*bmat.Stride+i : i*bmat.Stride+n] + stmp := s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+n] + for j, v := range amat.Data[i*amat.Stride+i : i*amat.Stride+n] { + stmp[j] = v + btmp[j] + } + } + return + } + } + + s.checkOverlapMatrix(a) + s.checkOverlapMatrix(b) + for i := 0; i < n; i++ { + stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + for j := i; j < n; j++ { + stmp[j] = a.At(i, j) + b.At(i, j) + } + } +} + +func (s *SymDense) CopySym(a Symmetric) int { + n := a.Symmetric() + n = min(n, s.mat.N) + if n == 0 { + return 0 + } + switch a := a.(type) { + case RawSymmetricer: + amat := a.RawSymmetric() + if amat.Uplo != blas.Upper { + panic(badSymTriangle) + } + for i := 0; i < n; i++ { + copy(s.mat.Data[i*s.mat.Stride+i:i*s.mat.Stride+n], amat.Data[i*amat.Stride+i:i*amat.Stride+n]) + } + default: + for i := 0; i < n; i++ { + stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + for j := i; j < n; j++ { + stmp[j] = a.At(i, j) + } + } + } + return n +} + +// SymRankOne performs a symetric rank-one update to the matrix a and stores +// the result in the receiver +// s = a + alpha * x * x' +func (s *SymDense) SymRankOne(a Symmetric, alpha float64, x Vector) { + n, c := x.Dims() + if a.Symmetric() != n || c != 1 { + panic(ErrShape) + } + s.reuseAs(n) + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.CopySym(a) + } + + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat := rv.RawVector() + s.checkOverlap((&VecDense{mat: xmat}).asGeneral()) + blas64.Syr(alpha, xmat, s.mat) + return + } + + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.set(i, j, s.at(i, j)+alpha*x.AtVec(i)*x.AtVec(j)) + } + } +} + +// SymRankK performs a symmetric rank-k update to the matrix a and stores the +// result into the receiver. If a is zero, see SymOuterK. +// s = a + alpha * x * x' +func (s *SymDense) SymRankK(a Symmetric, alpha float64, x Matrix) { + n := a.Symmetric() + r, _ := x.Dims() + if r != n { + panic(ErrShape) + } + xMat, aTrans := untranspose(x) + var g blas64.General + if rm, ok := xMat.(RawMatrixer); ok { + g = rm.RawMatrix() + } else { + g = DenseCopyOf(x).mat + aTrans = false + } + if a != s { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.reuseAs(n) + s.CopySym(a) + } + t := blas.NoTrans + if aTrans { + t = blas.Trans + } + blas64.Syrk(t, alpha, g, 1, s.mat) +} + +// SymOuterK calculates the outer product of x with itself and stores +// the result into the receiver. It is equivalent to the matrix +// multiplication +// s = alpha * x * x'. +// In order to update an existing matrix, see SymRankOne. +func (s *SymDense) SymOuterK(alpha float64, x Matrix) { + n, _ := x.Dims() + switch { + case s.IsZero(): + s.mat = blas64.Symmetric{ + N: n, + Stride: n, + Data: useZeroed(s.mat.Data, n*n), + Uplo: blas.Upper, + } + s.cap = n + s.SymRankK(s, alpha, x) + case s.mat.Uplo != blas.Upper: + panic(badSymTriangle) + case s.mat.N == n: + if s == x { + w := getWorkspaceSym(n, true) + w.SymRankK(w, alpha, x) + s.CopySym(w) + putWorkspaceSym(w) + } else { + switch r := x.(type) { + case RawMatrixer: + s.checkOverlap(r.RawMatrix()) + case RawSymmetricer: + s.checkOverlap(generalFromSymmetric(r.RawSymmetric())) + case RawTriangular: + s.checkOverlap(generalFromTriangular(r.RawTriangular())) + } + // Only zero the upper triangle. + for i := 0; i < n; i++ { + ri := i * s.mat.Stride + zero(s.mat.Data[ri+i : ri+n]) + } + s.SymRankK(s, alpha, x) + } + default: + panic(ErrShape) + } +} + +// RankTwo performs a symmmetric rank-two update to the matrix a and stores +// the result in the receiver +// m = a + alpha * (x * y' + y * x') +func (s *SymDense) RankTwo(a Symmetric, alpha float64, x, y Vector) { + n := s.mat.N + xr, xc := x.Dims() + if xr != n || xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yr != n || yc != 1 { + panic(ErrShape) + } + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + s.checkOverlap((&VecDense{mat: xmat}).asGeneral()) + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + s.checkOverlap((&VecDense{mat: ymat}).asGeneral()) + } else { + fast = false + } + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.reuseAs(n) + s.CopySym(a) + } + + if fast { + if s != a { + s.reuseAs(n) + s.CopySym(a) + } + blas64.Syr2(alpha, xmat, ymat, s.mat) + return + } + + for i := 0; i < n; i++ { + s.reuseAs(n) + for j := i; j < n; j++ { + s.set(i, j, a.At(i, j)+alpha*(x.AtVec(i)*y.AtVec(j)+y.AtVec(i)*x.AtVec(j))) + } + } +} + +// ScaleSym multiplies the elements of a by f, placing the result in the receiver. +func (s *SymDense) ScaleSym(f float64, a Symmetric) { + n := a.Symmetric() + s.reuseAs(n) + if a, ok := a.(RawSymmetricer); ok { + amat := a.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(amat)) + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = f * amat.Data[i*amat.Stride+j] + } + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = f * a.At(i, j) + } + } +} + +// SubsetSym extracts a subset of the rows and columns of the matrix a and stores +// the result in-place into the receiver. The resulting matrix size is +// len(set)×len(set). Specifically, at the conclusion of SubsetSym, +// s.At(i, j) equals a.At(set[i], set[j]). Note that the supplied set does not +// have to be a strict subset, dimension repeats are allowed. +func (s *SymDense) SubsetSym(a Symmetric, set []int) { + n := len(set) + na := a.Symmetric() + s.reuseAs(n) + var restore func() + if a == s { + s, restore = s.isolatedWorkspace(a) + defer restore() + } + + if a, ok := a.(RawSymmetricer); ok { + raw := a.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(raw)) + } + for i := 0; i < n; i++ { + ssub := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + r := set[i] + rsub := raw.Data[r*raw.Stride : r*raw.Stride+na] + for j := i; j < n; j++ { + c := set[j] + if r <= c { + ssub[j] = rsub[c] + } else { + ssub[j] = raw.Data[c*raw.Stride+r] + } + } + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = a.At(set[i], set[j]) + } + } +} + +// SliceSym returns a new Matrix that shares backing data with the receiver. +// The returned matrix starts at {i,i} of the receiver and extends k-i rows +// and columns. The final row and column in the resulting matrix is k-1. +// SliceSym panics with ErrIndexOutOfRange if the slice is outside the +// capacity of the receiver. +func (s *SymDense) SliceSym(i, k int) Symmetric { + sz := s.cap + if i < 0 || sz < i || k < i || sz < k { + panic(ErrIndexOutOfRange) + } + v := *s + v.mat.Data = s.mat.Data[i*s.mat.Stride+i : (k-1)*s.mat.Stride+k] + v.mat.N = k - i + v.cap = s.cap - i + return &v +} + +// Trace returns the trace of the matrix. +func (s *SymDense) Trace() float64 { + // TODO(btracey): could use internal asm sum routine. + var v float64 + for i := 0; i < s.mat.N; i++ { + v += s.mat.Data[i*s.mat.Stride+i] + } + return v +} + +// GrowSym returns the receiver expanded by n rows and n columns. If the +// dimensions of the expanded matrix are outside the capacity of the receiver +// a new allocation is made, otherwise not. Note that the receiver itself is +// not modified during the call to GrowSquare. +func (s *SymDense) GrowSym(n int) Symmetric { + if n < 0 { + panic(ErrIndexOutOfRange) + } + if n == 0 { + return s + } + var v SymDense + n += s.mat.N + if n > s.cap { + v.mat = blas64.Symmetric{ + N: n, + Stride: n, + Uplo: blas.Upper, + Data: make([]float64, n*n), + } + v.cap = n + // Copy elements, including those not currently visible. Use a temporary + // structure to avoid modifying the receiver. + var tmp SymDense + tmp.mat = blas64.Symmetric{ + N: s.cap, + Stride: s.mat.Stride, + Data: s.mat.Data, + Uplo: s.mat.Uplo, + } + tmp.cap = s.cap + v.CopySym(&tmp) + return &v + } + v.mat = blas64.Symmetric{ + N: n, + Stride: s.mat.Stride, + Uplo: blas.Upper, + Data: s.mat.Data[:(n-1)*s.mat.Stride+n], + } + v.cap = s.cap + return &v +} + +// PowPSD computes a^pow where a is a positive symmetric definite matrix. +// +// PowPSD returns an error if the matrix is not not positive symmetric definite +// or the Eigendecomposition is not successful. +func (s *SymDense) PowPSD(a Symmetric, pow float64) error { + dim := a.Symmetric() + s.reuseAs(dim) + + var eigen EigenSym + ok := eigen.Factorize(a, true) + if !ok { + return ErrFailedEigen + } + values := eigen.Values(nil) + for i, v := range values { + if v <= 0 { + return ErrNotPSD + } + values[i] = math.Pow(v, pow) + } + u := eigen.VectorsTo(nil) + + s.SymOuterK(values[0], u.ColView(0)) + + var v VecDense + for i := 1; i < dim; i++ { + v.ColViewOf(u, i) + s.SymRankOne(s, values[i], &v) + } + return nil +} diff --git a/vendor/gonum.org/v1/gonum/mat/triangular.go b/vendor/gonum.org/v1/gonum/mat/triangular.go new file mode 100644 index 00000000..e32ee405 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/triangular.go @@ -0,0 +1,659 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +var ( + triDense *TriDense + _ Matrix = triDense + _ Triangular = triDense + _ RawTriangular = triDense + _ MutableTriangular = triDense + + _ NonZeroDoer = triDense + _ RowNonZeroDoer = triDense + _ ColNonZeroDoer = triDense +) + +const badTriCap = "mat: bad capacity for TriDense" + +// TriDense represents an upper or lower triangular matrix in dense storage +// format. +type TriDense struct { + mat blas64.Triangular + cap int +} + +// Triangular represents a triangular matrix. Triangular matrices are always square. +type Triangular interface { + Matrix + // Triangle returns the number of rows/columns in the matrix and its + // orientation. + Triangle() (n int, kind TriKind) + + // TTri is the equivalent of the T() method in the Matrix interface but + // guarantees the transpose is of triangular type. + TTri() Triangular +} + +// A RawTriangular can return a blas64.Triangular representation of the receiver. +// Changes to the blas64.Triangular.Data slice will be reflected in the original +// matrix, changes to the N, Stride, Uplo and Diag fields will not. +type RawTriangular interface { + RawTriangular() blas64.Triangular +} + +// A MutableTriangular can set elements of a triangular matrix. +type MutableTriangular interface { + Triangular + SetTri(i, j int, v float64) +} + +var ( + _ Matrix = TransposeTri{} + _ Triangular = TransposeTri{} + _ UntransposeTrier = TransposeTri{} +) + +// TransposeTri is a type for performing an implicit transpose of a Triangular +// matrix. It implements the Triangular interface, returning values from the +// transpose of the matrix within. +type TransposeTri struct { + Triangular Triangular +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Triangular field. +func (t TransposeTri) At(i, j int) float64 { + return t.Triangular.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. Triangular matrices are +// square and thus this is the same size as the original Triangular. +func (t TransposeTri) Dims() (r, c int) { + c, r = t.Triangular.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Triangular field. +func (t TransposeTri) T() Matrix { + return t.Triangular +} + +// Triangle returns the number of rows/columns in the matrix and its orientation. +func (t TransposeTri) Triangle() (int, TriKind) { + n, upper := t.Triangular.Triangle() + return n, !upper +} + +// TTri performs an implicit transpose by returning the Triangular field. +func (t TransposeTri) TTri() Triangular { + return t.Triangular +} + +// Untranspose returns the Triangular field. +func (t TransposeTri) Untranspose() Matrix { + return t.Triangular +} + +func (t TransposeTri) UntransposeTri() Triangular { + return t.Triangular +} + +// NewTriDense creates a new Triangular matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*n, data is +// used as the backing slice, and changes to the elements of the returned TriDense +// will be reflected in data. If neither of these is true, NewTriDense will panic. +// NewTriDense will panic if n is zero. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +// Only the values in the triangular portion corresponding to kind are used. +func NewTriDense(n int, kind TriKind, data []float64) *TriDense { + if n <= 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data != nil && len(data) != n*n { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*n) + } + uplo := blas.Lower + if kind == Upper { + uplo = blas.Upper + } + return &TriDense{ + mat: blas64.Triangular{ + N: n, + Stride: n, + Data: data, + Uplo: uplo, + Diag: blas.NonUnit, + }, + cap: n, + } +} + +func (t *TriDense) Dims() (r, c int) { + return t.mat.N, t.mat.N +} + +// Triangle returns the dimension of t and its orientation. The returned +// orientation is only valid when n is not zero. +func (t *TriDense) Triangle() (n int, kind TriKind) { + return t.mat.N, t.triKind() +} + +func (t *TriDense) isUpper() bool { + return isUpperUplo(t.mat.Uplo) +} + +func (t *TriDense) triKind() TriKind { + return TriKind(isUpperUplo(t.mat.Uplo)) +} + +func isUpperUplo(u blas.Uplo) bool { + switch u { + case blas.Upper: + return true + case blas.Lower: + return false + default: + panic(badTriangle) + } +} + +func uploToTriKind(u blas.Uplo) TriKind { + switch u { + case blas.Upper: + return Upper + case blas.Lower: + return Lower + default: + panic(badTriangle) + } +} + +// asSymBlas returns the receiver restructured as a blas64.Symmetric with the +// same backing memory. Panics if the receiver is unit. +// This returns a blas64.Symmetric and not a *SymDense because SymDense can only +// be upper triangular. +func (t *TriDense) asSymBlas() blas64.Symmetric { + if t.mat.Diag == blas.Unit { + panic("mat: cannot convert unit TriDense into blas64.Symmetric") + } + return blas64.Symmetric{ + N: t.mat.N, + Stride: t.mat.Stride, + Data: t.mat.Data, + Uplo: t.mat.Uplo, + } +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (t *TriDense) T() Matrix { + return Transpose{t} +} + +// TTri performs an implicit transpose by returning the receiver inside a TransposeTri. +func (t *TriDense) TTri() Triangular { + return TransposeTri{t} +} + +func (t *TriDense) RawTriangular() blas64.Triangular { + return t.mat +} + +// SetRawTriangular sets the underlying blas64.Triangular used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +// +// The supplied Triangular must not use blas.Unit storage format. +func (t *TriDense) SetRawTriangular(mat blas64.Triangular) { + if mat.Diag == blas.Unit { + panic("mat: cannot set TriDense with Unit storage format") + } + t.mat = mat +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (t *TriDense) Reset() { + // N and Stride must be zeroed in unison. + t.mat.N, t.mat.Stride = 0, 0 + // Defensively zero Uplo to ensure + // it is set correctly later. + t.mat.Uplo = 0 + t.mat.Data = t.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (t *TriDense) Zero() { + if t.isUpper() { + for i := 0; i < t.mat.N; i++ { + zero(t.mat.Data[i*t.mat.Stride+i : i*t.mat.Stride+t.mat.N]) + } + return + } + for i := 0; i < t.mat.N; i++ { + zero(t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+i+1]) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. TriDense matrices can be zeroed using Reset. +func (t *TriDense) IsZero() bool { + // It must be the case that t.Dims() returns + // zeros in this case. See comment in Reset(). + return t.mat.Stride == 0 +} + +// untranspose untransposes a matrix if applicable. If a is an Untransposer, then +// untranspose returns the underlying matrix and true. If it is not, then it returns +// the input matrix and false. +func untransposeTri(a Triangular) (Triangular, bool) { + if ut, ok := a.(UntransposeTrier); ok { + return ut.UntransposeTri(), true + } + return a, false +} + +// reuseAs resizes a zero receiver to an n×n triangular matrix with the given +// orientation. If the receiver is non-zero, reuseAs checks that the receiver +// is the correct size and orientation. +func (t *TriDense) reuseAs(n int, kind TriKind) { + if n == 0 { + panic(ErrZeroLength) + } + ul := blas.Lower + if kind == Upper { + ul = blas.Upper + } + if t.mat.N > t.cap { + panic(badTriCap) + } + if t.IsZero() { + t.mat = blas64.Triangular{ + N: n, + Stride: n, + Diag: blas.NonUnit, + Data: use(t.mat.Data, n*n), + Uplo: ul, + } + t.cap = n + return + } + if t.mat.N != n { + panic(ErrShape) + } + if t.mat.Uplo != ul { + panic(ErrTriangle) + } +} + +// isolatedWorkspace returns a new TriDense matrix w with the size of a and +// returns a callback to defer which performs cleanup at the return of the call. +// This should be used when a method receiver is the same pointer as an input argument. +func (t *TriDense) isolatedWorkspace(a Triangular) (w *TriDense, restore func()) { + n, kind := a.Triangle() + if n == 0 { + panic(ErrZeroLength) + } + w = getWorkspaceTri(n, kind, false) + return w, func() { + t.Copy(w) + putWorkspaceTri(w) + } +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (t *TriDense) DiagView() Diagonal { + if t.mat.Diag == blas.Unit { + panic("mat: cannot take view of Unit diagonal") + } + n := t.mat.N + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: t.mat.Stride + 1, + Data: t.mat.Data[:(n-1)*t.mat.Stride+n], + }, + } +} + +// Copy makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two matrices and +// returns the number of rows and columns it copied. Only elements within the +// receiver's non-zero triangle are set. +// +// See the Copier interface for more information. +func (t *TriDense) Copy(a Matrix) (r, c int) { + r, c = a.Dims() + r = min(r, t.mat.N) + c = min(c, t.mat.N) + if r == 0 || c == 0 { + return 0, 0 + } + + switch a := a.(type) { + case RawMatrixer: + amat := a.RawMatrix() + if t.isUpper() { + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride+i:i*t.mat.Stride+c], amat.Data[i*amat.Stride+i:i*amat.Stride+c]) + } + } else { + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride:i*t.mat.Stride+i+1], amat.Data[i*amat.Stride:i*amat.Stride+i+1]) + } + } + case RawTriangular: + amat := a.RawTriangular() + aIsUpper := isUpperUplo(amat.Uplo) + tIsUpper := t.isUpper() + switch { + case tIsUpper && aIsUpper: + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride+i:i*t.mat.Stride+c], amat.Data[i*amat.Stride+i:i*amat.Stride+c]) + } + case !tIsUpper && !aIsUpper: + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride:i*t.mat.Stride+i+1], amat.Data[i*amat.Stride:i*amat.Stride+i+1]) + } + default: + for i := 0; i < r; i++ { + t.set(i, i, amat.Data[i*amat.Stride+i]) + } + } + default: + isUpper := t.isUpper() + for i := 0; i < r; i++ { + if isUpper { + for j := i; j < c; j++ { + t.set(i, j, a.At(i, j)) + } + } else { + for j := 0; j <= i; j++ { + t.set(i, j, a.At(i, j)) + } + } + } + } + + return r, c +} + +// InverseTri computes the inverse of the triangular matrix a, storing the result +// into the receiver. If a is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally be +// avoided where possible, for example by using the Solve routines. +func (t *TriDense) InverseTri(a Triangular) error { + t.checkOverlapMatrix(a) + n, _ := a.Triangle() + t.reuseAs(a.Triangle()) + t.Copy(a) + work := getFloats(3*n, false) + iwork := getInts(n, false) + cond := lapack64.Trcon(CondNorm, t.mat, work, iwork) + putFloats(work) + putInts(iwork) + if math.IsInf(cond, 1) { + return Condition(cond) + } + ok := lapack64.Trtri(t.mat) + if !ok { + return Condition(math.Inf(1)) + } + if cond > ConditionTolerance { + return Condition(cond) + } + return nil +} + +// MulTri takes the product of triangular matrices a and b and places the result +// in the receiver. The size of a and b must match, and they both must have the +// same TriKind, or Mul will panic. +func (t *TriDense) MulTri(a, b Triangular) { + n, kind := a.Triangle() + nb, kindb := b.Triangle() + if n != nb { + panic(ErrShape) + } + if kind != kindb { + panic(ErrTriangle) + } + + aU, _ := untransposeTri(a) + bU, _ := untransposeTri(b) + t.checkOverlapMatrix(bU) + t.checkOverlapMatrix(aU) + t.reuseAs(n, kind) + var restore func() + if t == aU { + t, restore = t.isolatedWorkspace(aU) + defer restore() + } else if t == bU { + t, restore = t.isolatedWorkspace(bU) + defer restore() + } + + // TODO(btracey): Improve the set of fast-paths. + if kind == Upper { + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + var v float64 + for k := i; k <= j; k++ { + v += a.At(i, k) * b.At(k, j) + } + t.SetTri(i, j, v) + } + } + return + } + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + var v float64 + for k := j; k <= i; k++ { + v += a.At(i, k) * b.At(k, j) + } + t.SetTri(i, j, v) + } + } +} + +// ScaleTri multiplies the elements of a by f, placing the result in the receiver. +// If the receiver is non-zero, the size and kind of the receiver must match +// the input, or ScaleTri will panic. +func (t *TriDense) ScaleTri(f float64, a Triangular) { + n, kind := a.Triangle() + t.reuseAs(n, kind) + + // TODO(btracey): Improve the set of fast-paths. + switch a := a.(type) { + case RawTriangular: + amat := a.RawTriangular() + if t != a { + t.checkOverlap(generalFromTriangular(amat)) + } + if kind == Upper { + for i := 0; i < n; i++ { + ts := t.mat.Data[i*t.mat.Stride+i : i*t.mat.Stride+n] + as := amat.Data[i*amat.Stride+i : i*amat.Stride+n] + for i, v := range as { + ts[i] = v * f + } + } + return + } + for i := 0; i < n; i++ { + ts := t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+i+1] + as := amat.Data[i*amat.Stride : i*amat.Stride+i+1] + for i, v := range as { + ts[i] = v * f + } + } + return + default: + t.checkOverlapMatrix(a) + isUpper := kind == Upper + for i := 0; i < n; i++ { + if isUpper { + for j := i; j < n; j++ { + t.set(i, j, f*a.At(i, j)) + } + } else { + for j := 0; j <= i; j++ { + t.set(i, j, f*a.At(i, j)) + } + } + } + } +} + +// Trace returns the trace of the matrix. +func (t *TriDense) Trace() float64 { + // TODO(btracey): could use internal asm sum routine. + var v float64 + for i := 0; i < t.mat.N; i++ { + v += t.mat.Data[i*t.mat.Stride+i] + } + return v +} + +// copySymIntoTriangle copies a symmetric matrix into a TriDense +func copySymIntoTriangle(t *TriDense, s Symmetric) { + n, upper := t.Triangle() + ns := s.Symmetric() + if n != ns { + panic("mat: triangle size mismatch") + } + ts := t.mat.Stride + if rs, ok := s.(RawSymmetricer); ok { + sd := rs.RawSymmetric() + ss := sd.Stride + if upper { + if sd.Uplo == blas.Upper { + for i := 0; i < n; i++ { + copy(t.mat.Data[i*ts+i:i*ts+n], sd.Data[i*ss+i:i*ss+n]) + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + t.mat.Data[i*ts+j] = sd.Data[j*ss+i] + } + } + return + } + if sd.Uplo == blas.Upper { + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + t.mat.Data[i*ts+j] = sd.Data[j*ss+i] + } + } + return + } + for i := 0; i < n; i++ { + copy(t.mat.Data[i*ts:i*ts+i+1], sd.Data[i*ss:i*ss+i+1]) + } + return + } + if upper { + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + t.mat.Data[i*ts+j] = s.At(i, j) + } + } + return + } + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + t.mat.Data[i*ts+j] = s.At(i, j) + } + } +} + +// DoNonZero calls the function fn for each of the non-zero elements of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoNonZero(fn func(i, j int, v float64)) { + if t.isUpper() { + for i := 0; i < t.mat.N; i++ { + for j := i; j < t.mat.N; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } + return + } + for i := 0; i < t.mat.N; i++ { + for j := 0; j <= i; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || t.mat.N <= i { + panic(ErrRowAccess) + } + if t.isUpper() { + for j := i; j < t.mat.N; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + return + } + for j := 0; j <= i; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || t.mat.N <= j { + panic(ErrColAccess) + } + if t.isUpper() { + for i := 0; i <= j; i++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + return + } + for i := j; i < t.mat.N; i++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/triband.go b/vendor/gonum.org/v1/gonum/mat/triband.go new file mode 100644 index 00000000..f9785504 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/triband.go @@ -0,0 +1,353 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + triBand TriBanded + _ Banded = triBand + _ Triangular = triBand + + triBandDense *TriBandDense + _ Matrix = triBandDense + _ Triangular = triBandDense + _ Banded = triBandDense + _ TriBanded = triBandDense + _ RawTriBander = triBandDense + _ MutableTriBanded = triBandDense +) + +// TriBanded is a triangular band matrix interface type. +type TriBanded interface { + Banded + + // Triangle returns the number of rows/columns in the matrix and its + // orientation. + Triangle() (n int, kind TriKind) + + // TTri is the equivalent of the T() method in the Matrix interface but + // guarantees the transpose is of triangular type. + TTri() Triangular + + // TriBand returns the number of rows/columns in the matrix, the + // size of the bandwidth, and the orientation. + TriBand() (n, k int, kind TriKind) + + // TTriBand is the equivalent of the T() method in the Matrix interface but + // guarantees the transpose is of banded triangular type. + TTriBand() TriBanded +} + +// A RawTriBander can return a blas64.TriangularBand representation of the receiver. +// Changes to the blas64.TriangularBand.Data slice will be reflected in the original +// matrix, changes to the N, K, Stride, Uplo and Diag fields will not. +type RawTriBander interface { + RawTriBand() blas64.TriangularBand +} + +// MutableTriBanded is a triangular band matrix interface type that allows +// elements to be altered. +type MutableTriBanded interface { + TriBanded + SetTriBand(i, j int, v float64) +} + +var ( + tTriBand TransposeTriBand + _ Matrix = tTriBand + _ TriBanded = tTriBand + _ Untransposer = tTriBand + _ UntransposeTrier = tTriBand + _ UntransposeBander = tTriBand + _ UntransposeTriBander = tTriBand +) + +// TransposeTriBand is a type for performing an implicit transpose of a TriBanded +// matrix. It implements the TriBanded interface, returning values from the +// transpose of the matrix within. +type TransposeTriBand struct { + TriBanded TriBanded +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the TriBanded field. +func (t TransposeTriBand) At(i, j int) float64 { + return t.TriBanded.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. TriBanded matrices are +// square and thus this is the same size as the original TriBanded. +func (t TransposeTriBand) Dims() (r, c int) { + c, r = t.TriBanded.Dims() + return r, c +} + +// T performs an implicit transpose by returning the TriBand field. +func (t TransposeTriBand) T() Matrix { + return t.TriBanded +} + +// Triangle returns the number of rows/columns in the matrix and its orientation. +func (t TransposeTriBand) Triangle() (int, TriKind) { + n, upper := t.TriBanded.Triangle() + return n, !upper +} + +// TTri performs an implicit transpose by returning the TriBand field. +func (t TransposeTriBand) TTri() Triangular { + return t.TriBanded +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +func (t TransposeTriBand) Bandwidth() (kl, ku int) { + kl, ku = t.TriBanded.Bandwidth() + return ku, kl +} + +// TBand performs an implicit transpose by returning the TriBand field. +func (t TransposeTriBand) TBand() Banded { + return t.TriBanded +} + +// TriBand returns the number of rows/columns in the matrix, the +// size of the bandwidth, and the orientation. +func (t TransposeTriBand) TriBand() (n, k int, kind TriKind) { + n, k, kind = t.TriBanded.TriBand() + return n, k, !kind +} + +// TTriBand performs an implicit transpose by returning the TriBand field. +func (t TransposeTriBand) TTriBand() TriBanded { + return t.TriBanded +} + +// Untranspose returns the Triangular field. +func (t TransposeTriBand) Untranspose() Matrix { + return t.TriBanded +} + +// UntransposeTri returns the underlying Triangular matrix. +func (t TransposeTriBand) UntransposeTri() Triangular { + return t.TriBanded +} + +// UntransposeBand returns the underlying Banded matrix. +func (t TransposeTriBand) UntransposeBand() Banded { + return t.TriBanded +} + +// UntransposeTriBand returns the underlying TriBanded matrix. +func (t TransposeTriBand) UntransposeTriBand() TriBanded { + return t.TriBanded +} + +// TriBandDense represents a triangular band matrix in dense storage format. +type TriBandDense struct { + mat blas64.TriangularBand +} + +// NewTriBandDense creates a new triangular banded matrix with n rows and columns, +// k bands in the direction of the specified kind. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*(k+1), +// data is used as the backing slice, and changes to the elements of the returned +// TriBandDense will be reflected in data. If neither of these is true, NewTriBandDense +// will panic. k must be at least zero and less than n, otherwise NewTriBandDense will panic. +// +// The data must be arranged in row-major order constructed by removing the zeros +// from the rows outside the band and aligning the diagonals. For example, if +// the upper-triangular banded matrix +// 1 2 3 0 0 0 +// 0 4 5 6 0 0 +// 0 0 7 8 9 0 +// 0 0 0 10 11 12 +// 0 0 0 0 13 14 +// 0 0 0 0 0 15 +// becomes (* entries are never accessed) +// 1 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 * +// 15 * * +// which is passed to NewTriBandDense as []float64{1, 2, ..., 15, *, *, *} +// with k=2 and kind = mat.Upper. +// The lower triangular banded matrix +// 1 0 0 0 0 0 +// 2 3 0 0 0 0 +// 4 5 6 0 0 0 +// 0 7 8 9 0 0 +// 0 0 10 11 12 0 +// 0 0 0 13 14 15 +// becomes (* entries are never accessed) +// * * 1 +// * 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 15 +// which is passed to NewTriBandDense as []float64{*, *, *, 1, 2, ..., 15} +// with k=2 and kind = mat.Lower. +// Only the values in the band portion of the matrix are used. +func NewTriBandDense(n, k int, kind TriKind, data []float64) *TriBandDense { + if n <= 0 || k < 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if k+1 > n { + panic("mat: band out of range") + } + bc := k + 1 + if data != nil && len(data) != n*bc { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*bc) + } + uplo := blas.Lower + if kind { + uplo = blas.Upper + } + return &TriBandDense{ + mat: blas64.TriangularBand{ + Uplo: uplo, + Diag: blas.NonUnit, + N: n, + K: k, + Data: data, + Stride: bc, + }, + } +} + +// Dims returns the number of rows and columns in the matrix. +func (t *TriBandDense) Dims() (r, c int) { + return t.mat.N, t.mat.N +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (t *TriBandDense) T() Matrix { + return Transpose{t} +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. TriBandDense matrices can be zeroed using Reset. +func (t *TriBandDense) IsZero() bool { + // It must be the case that t.Dims() returns + // zeros in this case. See comment in Reset(). + return t.mat.Stride == 0 +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (t *TriBandDense) Reset() { + t.mat.N = 0 + t.mat.Stride = 0 + t.mat.K = 0 + t.mat.Data = t.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (t *TriBandDense) Zero() { + if t.isUpper() { + for i := 0; i < t.mat.N; i++ { + u := min(1+t.mat.K, t.mat.N-i) + zero(t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+u]) + } + return + } + for i := 0; i < t.mat.N; i++ { + l := max(0, t.mat.K-i) + zero(t.mat.Data[i*t.mat.Stride+l : i*t.mat.Stride+t.mat.K+1]) + } +} + +func (t *TriBandDense) isUpper() bool { + return isUpperUplo(t.mat.Uplo) +} + +func (t *TriBandDense) triKind() TriKind { + return TriKind(isUpperUplo(t.mat.Uplo)) +} + +// Triangle returns the dimension of t and its orientation. The returned +// orientation is only valid when n is not zero. +func (t *TriBandDense) Triangle() (n int, kind TriKind) { + return t.mat.N, t.triKind() +} + +// TTri performs an implicit transpose by returning the receiver inside a TransposeTri. +func (t *TriBandDense) TTri() Triangular { + return TransposeTri{t} +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +func (t *TriBandDense) Bandwidth() (kl, ku int) { + if t.isUpper() { + return 0, t.mat.K + } + return t.mat.K, 0 +} + +// TBand performs an implicit transpose by returning the receiver inside a TransposeBand. +func (t *TriBandDense) TBand() Banded { + return TransposeBand{t} +} + +// TriBand returns the number of rows/columns in the matrix, the +// size of the bandwidth, and the orientation. +func (t *TriBandDense) TriBand() (n, k int, kind TriKind) { + return t.mat.N, t.mat.K, TriKind(!t.IsZero()) && t.triKind() +} + +// TTriBand performs an implicit transpose by returning the receiver inside a TransposeTriBand. +func (t *TriBandDense) TTriBand() TriBanded { + return TransposeTriBand{t} +} + +// RawTriBand returns the underlying blas64.TriangularBand used by the receiver. +// Changes to the blas64.TriangularBand.Data slice will be reflected in the original +// matrix, changes to the N, K, Stride, Uplo and Diag fields will not. +func (t *TriBandDense) RawTriBand() blas64.TriangularBand { + return t.mat +} + +// SetRawTriBand sets the underlying blas64.TriangularBand used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +// +// The supplied TriangularBand must not use blas.Unit storage format. +func (t *TriBandDense) SetRawTriBand(mat blas64.TriangularBand) { + if mat.Diag == blas.Unit { + panic("mat: cannot set TriBand with Unit storage") + } + t.mat = mat +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (t *TriBandDense) DiagView() Diagonal { + if t.mat.Diag == blas.Unit { + panic("mat: cannot take view of Unit diagonal") + } + n := t.mat.N + data := t.mat.Data + if !t.isUpper() { + data = data[t.mat.K:] + } + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: t.mat.Stride, + Data: data[:(n-1)*t.mat.Stride+1], + }, + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/vector.go b/vendor/gonum.org/v1/gonum/mat/vector.go new file mode 100644 index 00000000..8191312b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/vector.go @@ -0,0 +1,741 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var ( + vector *VecDense + + _ Matrix = vector + _ Vector = vector + _ Reseter = vector +) + +// Vector is a vector. +type Vector interface { + Matrix + AtVec(int) float64 + Len() int +} + +// TransposeVec is a type for performing an implicit transpose of a Vector. +// It implements the Vector interface, returning values from the transpose +// of the vector within. +type TransposeVec struct { + Vector Vector +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Vector field. +func (t TransposeVec) At(i, j int) float64 { + return t.Vector.At(j, i) +} + +// AtVec returns the element at position i. It panics if i is out of bounds. +func (t TransposeVec) AtVec(i int) float64 { + return t.Vector.AtVec(i) +} + +// Dims returns the dimensions of the transposed vector. +func (t TransposeVec) Dims() (r, c int) { + c, r = t.Vector.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Vector field. +func (t TransposeVec) T() Matrix { + return t.Vector +} + +// Len returns the number of columns in the vector. +func (t TransposeVec) Len() int { + return t.Vector.Len() +} + +// TVec performs an implicit transpose by returning the Vector field. +func (t TransposeVec) TVec() Vector { + return t.Vector +} + +// Untranspose returns the Vector field. +func (t TransposeVec) Untranspose() Matrix { + return t.Vector +} + +func (t TransposeVec) UntransposeVec() Vector { + return t.Vector +} + +// VecDense represents a column vector. +type VecDense struct { + mat blas64.Vector + // A BLAS vector can have a negative increment, but allowing this + // in the mat type complicates a lot of code, and doesn't gain anything. + // VecDense must have positive increment in this package. +} + +// NewVecDense creates a new VecDense of length n. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n, data is +// used as the backing slice, and changes to the elements of the returned VecDense +// will be reflected in data. If neither of these is true, NewVecDense will panic. +// NewVecDense will panic if n is zero. +func NewVecDense(n int, data []float64) *VecDense { + if n <= 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if len(data) != n && data != nil { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n) + } + return &VecDense{ + mat: blas64.Vector{ + N: n, + Inc: 1, + Data: data, + }, + } +} + +// SliceVec returns a new Vector that shares backing data with the receiver. +// The returned matrix starts at i of the receiver and extends k-i elements. +// SliceVec panics with ErrIndexOutOfRange if the slice is outside the capacity +// of the receiver. +func (v *VecDense) SliceVec(i, k int) Vector { + if i < 0 || k <= i || v.Cap() < k { + panic(ErrIndexOutOfRange) + } + return &VecDense{ + mat: blas64.Vector{ + N: k - i, + Inc: v.mat.Inc, + Data: v.mat.Data[i*v.mat.Inc : (k-1)*v.mat.Inc+1], + }, + } +} + +// Dims returns the number of rows and columns in the matrix. Columns is always 1 +// for a non-Reset vector. +func (v *VecDense) Dims() (r, c int) { + if v.IsZero() { + return 0, 0 + } + return v.mat.N, 1 +} + +// Caps returns the number of rows and columns in the backing matrix. Columns is always 1 +// for a non-Reset vector. +func (v *VecDense) Caps() (r, c int) { + if v.IsZero() { + return 0, 0 + } + return v.Cap(), 1 +} + +// Len returns the length of the vector. +func (v *VecDense) Len() int { + return v.mat.N +} + +// Cap returns the capacity of the vector. +func (v *VecDense) Cap() int { + if v.IsZero() { + return 0 + } + return (cap(v.mat.Data)-1)/v.mat.Inc + 1 +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (v *VecDense) T() Matrix { + return Transpose{v} +} + +// TVec performs an implicit transpose by returning the receiver inside a TransposeVec. +func (v *VecDense) TVec() Vector { + return TransposeVec{v} +} + +// Reset zeros the length of the vector so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (v *VecDense) Reset() { + // No change of Inc or N to 0 may be + // made unless both are set to 0. + v.mat.Inc = 0 + v.mat.N = 0 + v.mat.Data = v.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (v *VecDense) Zero() { + for i := 0; i < v.mat.N; i++ { + v.mat.Data[v.mat.Inc*i] = 0 + } +} + +// CloneVec makes a copy of a into the receiver, overwriting the previous value +// of the receiver. +func (v *VecDense) CloneVec(a Vector) { + if v == a { + return + } + n := a.Len() + v.mat = blas64.Vector{ + N: n, + Inc: 1, + Data: use(v.mat.Data, n), + } + if r, ok := a.(RawVectorer); ok { + blas64.Copy(r.RawVector(), v.mat) + return + } + for i := 0; i < a.Len(); i++ { + v.SetVec(i, a.AtVec(i)) + } +} + +// VecDenseCopyOf returns a newly allocated copy of the elements of a. +func VecDenseCopyOf(a Vector) *VecDense { + v := &VecDense{} + v.CloneVec(a) + return v +} + +func (v *VecDense) RawVector() blas64.Vector { + return v.mat +} + +// CopyVec makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two vectors and +// returns the number of elements it copied. +func (v *VecDense) CopyVec(a Vector) int { + n := min(v.Len(), a.Len()) + if v == a { + return n + } + if r, ok := a.(RawVectorer); ok { + blas64.Copy(r.RawVector(), v.mat) + return n + } + for i := 0; i < n; i++ { + v.setVec(i, a.AtVec(i)) + } + return n +} + +// ScaleVec scales the vector a by alpha, placing the result in the receiver. +func (v *VecDense) ScaleVec(alpha float64, a Vector) { + n := a.Len() + + if v == a { + if v.mat.Inc == 1 { + f64.ScalUnitary(alpha, v.mat.Data) + return + } + f64.ScalInc(alpha, v.mat.Data, uintptr(n), uintptr(v.mat.Inc)) + return + } + + v.reuseAs(n) + + if rv, ok := a.(RawVectorer); ok { + mat := rv.RawVector() + v.checkOverlap(mat) + if v.mat.Inc == 1 && mat.Inc == 1 { + f64.ScalUnitaryTo(v.mat.Data, alpha, mat.Data) + return + } + f64.ScalIncTo(v.mat.Data, uintptr(v.mat.Inc), + alpha, mat.Data, uintptr(n), uintptr(mat.Inc)) + return + } + + for i := 0; i < n; i++ { + v.setVec(i, alpha*a.AtVec(i)) + } +} + +// AddScaledVec adds the vectors a and alpha*b, placing the result in the receiver. +func (v *VecDense) AddScaledVec(a Vector, alpha float64, b Vector) { + if alpha == 1 { + v.AddVec(a, b) + return + } + if alpha == -1 { + v.SubVec(a, b) + return + } + + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + var amat, bmat blas64.Vector + fast := true + aU, _ := untranspose(a) + if rv, ok := aU.(RawVectorer); ok { + amat = rv.RawVector() + if v != a { + v.checkOverlap(amat) + } + } else { + fast = false + } + bU, _ := untranspose(b) + if rv, ok := bU.(RawVectorer); ok { + bmat = rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + } else { + fast = false + } + + v.reuseAs(ar) + + switch { + case alpha == 0: // v <- a + if v == a { + return + } + v.CopyVec(a) + case v == a && v == b: // v <- v + alpha * v = (alpha + 1) * v + blas64.Scal(alpha+1, v.mat) + case !fast: // v <- a + alpha * b without blas64 support. + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)+alpha*b.AtVec(i)) + } + case v == a && v != b: // v <- v + alpha * b + if v.mat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, alpha, bmat.Data, amat.Data) + } else { + f64.AxpyInc(alpha, bmat.Data, v.mat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(v.mat.Inc), 0, 0) + } + default: // v <- a + alpha * b or v <- a + alpha * v + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, alpha, bmat.Data, amat.Data) + } else { + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + alpha, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + } + } +} + +// AddVec adds the vectors a and b, placing the result in the receiver. +func (v *VecDense) AddVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, 1, bmat.Data, amat.Data) + return + } + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + 1, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)+b.AtVec(i)) + } +} + +// SubVec subtracts the vector b from a, placing the result in the receiver. +func (v *VecDense) SubVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, -1, bmat.Data, amat.Data) + return + } + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + -1, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)-b.AtVec(i)) + } +} + +// MulElemVec performs element-wise multiplication of a and b, placing the result +// in the receiver. +func (v *VecDense) MulElemVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + for i, a := range amat.Data { + v.mat.Data[i] = a * bmat.Data[i] + } + return + } + var ia, ib int + for i := 0; i < ar; i++ { + v.setVec(i, amat.Data[ia]*bmat.Data[ib]) + ia += amat.Inc + ib += bmat.Inc + } + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)*b.AtVec(i)) + } +} + +// DivElemVec performs element-wise division of a by b, placing the result +// in the receiver. +func (v *VecDense) DivElemVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + for i, a := range amat.Data { + v.setVec(i, a/bmat.Data[i]) + } + return + } + var ia, ib int + for i := 0; i < ar; i++ { + v.setVec(i, amat.Data[ia]/bmat.Data[ib]) + ia += amat.Inc + ib += bmat.Inc + } + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)/b.AtVec(i)) + } +} + +// MulVec computes a * b. The result is stored into the receiver. +// MulVec panics if the number of columns in a does not equal the number of rows in b +// or if the number of columns in b does not equal 1. +func (v *VecDense) MulVec(a Matrix, b Vector) { + r, c := a.Dims() + br, bc := b.Dims() + if c != br || bc != 1 { + panic(ErrShape) + } + + aU, trans := untranspose(a) + var bmat blas64.Vector + fast := true + bU, _ := untranspose(b) + if rv, ok := bU.(RawVectorer); ok { + bmat = rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + } else { + fast = false + } + + v.reuseAs(r) + var restore func() + if v == aU { + v, restore = v.isolatedWorkspace(aU.(*VecDense)) + defer restore() + } else if v == b { + v, restore = v.isolatedWorkspace(b) + defer restore() + } + + // TODO(kortschak): Improve the non-fast paths. + switch aU := aU.(type) { + case Vector: + if b.Len() == 1 { + // {n,1} x {1,1} + v.ScaleVec(b.AtVec(0), aU) + return + } + + // {1,n} x {n,1} + if fast { + if rv, ok := aU.(RawVectorer); ok { + amat := rv.RawVector() + if v != aU { + v.checkOverlap(amat) + } + + if amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + v.setVec(0, f64.DotUnitary(amat.Data, bmat.Data)) + return + } + v.setVec(0, f64.DotInc(amat.Data, bmat.Data, + uintptr(c), uintptr(amat.Inc), uintptr(bmat.Inc), 0, 0)) + return + } + } + var sum float64 + for i := 0; i < c; i++ { + sum += aU.AtVec(i) * b.AtVec(i) + } + v.setVec(0, sum) + return + case RawSymmetricer: + if fast { + amat := aU.RawSymmetric() + // We don't know that a is a *SymDense, so make + // a temporary SymDense to check overlap. + (&SymDense{mat: amat}).checkOverlap(v.asGeneral()) + blas64.Symv(1, amat, bmat, 0, v.mat) + return + } + case RawTriangular: + v.CopyVec(b) + amat := aU.RawTriangular() + // We don't know that a is a *TriDense, so make + // a temporary TriDense to check overlap. + (&TriDense{mat: amat}).checkOverlap(v.asGeneral()) + ta := blas.NoTrans + if trans { + ta = blas.Trans + } + blas64.Trmv(ta, amat, v.mat) + case RawMatrixer: + if fast { + amat := aU.RawMatrix() + // We don't know that a is a *Dense, so make + // a temporary Dense to check overlap. + (&Dense{mat: amat}).checkOverlap(v.asGeneral()) + t := blas.NoTrans + if trans { + t = blas.Trans + } + blas64.Gemv(t, 1, amat, bmat, 0, v.mat) + return + } + default: + if fast { + for i := 0; i < r; i++ { + var f float64 + for j := 0; j < c; j++ { + f += a.At(i, j) * bmat.Data[j*bmat.Inc] + } + v.setVec(i, f) + } + return + } + } + + for i := 0; i < r; i++ { + var f float64 + for j := 0; j < c; j++ { + f += a.At(i, j) * b.AtVec(j) + } + v.setVec(i, f) + } +} + +// reuseAs resizes an empty vector to a r×1 vector, +// or checks that a non-empty matrix is r×1. +func (v *VecDense) reuseAs(r int) { + if r == 0 { + panic(ErrZeroLength) + } + if v.IsZero() { + v.mat = blas64.Vector{ + N: r, + Inc: 1, + Data: use(v.mat.Data, r), + } + return + } + if r != v.mat.N { + panic(ErrShape) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized vectors can be the +// receiver for size-restricted operations. VecDenses can be zeroed using Reset. +func (v *VecDense) IsZero() bool { + // It must be the case that v.Dims() returns + // zeros in this case. See comment in Reset(). + return v.mat.Inc == 0 +} + +func (v *VecDense) isolatedWorkspace(a Vector) (n *VecDense, restore func()) { + l := a.Len() + if l == 0 { + panic(ErrZeroLength) + } + n = getWorkspaceVec(l, false) + return n, func() { + v.CopyVec(n) + putWorkspaceVec(n) + } +} + +// asDense returns a Dense representation of the receiver with the same +// underlying data. +func (v *VecDense) asDense() *Dense { + return &Dense{ + mat: v.asGeneral(), + capRows: v.mat.N, + capCols: 1, + } +} + +// asGeneral returns a blas64.General representation of the receiver with the +// same underlying data. +func (v *VecDense) asGeneral() blas64.General { + return blas64.General{ + Rows: v.mat.N, + Cols: 1, + Stride: v.mat.Inc, + Data: v.mat.Data, + } +} + +// ColViewOf reflects the column j of the RawMatrixer m, into the receiver +// backed by the same underlying data. The length of the receiver must either be +// zero or match the number of rows in m. +func (v *VecDense) ColViewOf(m RawMatrixer, j int) { + rm := m.RawMatrix() + + if j >= rm.Cols || j < 0 { + panic(ErrColAccess) + } + if !v.IsZero() && v.mat.N != rm.Rows { + panic(ErrShape) + } + + v.mat.Inc = rm.Stride + v.mat.Data = rm.Data[j : (rm.Rows-1)*rm.Stride+j+1] + v.mat.N = rm.Rows +} + +// RowViewOf reflects the row i of the RawMatrixer m, into the receiver +// backed by the same underlying data. The length of the receiver must either be +// zero or match the number of columns in m. +func (v *VecDense) RowViewOf(m RawMatrixer, i int) { + rm := m.RawMatrix() + + if i >= rm.Rows || i < 0 { + panic(ErrRowAccess) + } + if !v.IsZero() && v.mat.N != rm.Cols { + panic(ErrShape) + } + + v.mat.Inc = 1 + v.mat.Data = rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] + v.mat.N = rm.Cols +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml index 9f556934..055480b9 100644 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -1,12 +1,16 @@ language: go go: - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - tip + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index e4e56e28..129bc2a9 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -229,6 +229,10 @@ type decoder struct { mapType reflect.Type terrors []string strict bool + + decodeCount int + aliasCount int + aliasDepth int } var ( @@ -314,7 +318,43 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm return out, false, false } +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } switch n.kind { case documentNode: return d.document(n, out) @@ -353,7 +393,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) { failf("anchor '%s' value contains itself", n.value) } d.aliases[n] = true + d.aliasDepth++ good = d.unmarshal(n.alias, out) + d.aliasDepth-- delete(d.aliases, n) return good } @@ -746,8 +788,7 @@ func (d *decoder) merge(n *node, out reflect.Value) { case mappingNode: d.unmarshal(n, out) case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { + if n.alias != nil && n.alias.kind != mappingNode { failWantMap() } d.unmarshal(n, out) @@ -756,8 +797,7 @@ func (d *decoder) merge(n *node, out reflect.Value) { for i := len(n.children) - 1; i >= 0; i-- { ni := n.children[i] if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { + if ni.alias != nil && ni.alias.kind != mappingNode { failWantMap() } } else if ni.kind != mappingNode { diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 6c151db6..4120e0c9 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -81,7 +81,7 @@ func resolvableTag(tag string) bool { return false } -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 077fd1dd..0b9bb603 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -626,30 +626,17 @@ func trace(args ...interface{}) func() { func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { // While we need more tokens to fetch, do it. for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { return false + } else if !valid { + break } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break } // Fetch the next token. if !yaml_parser_fetch_next_token(parser) { @@ -678,11 +665,6 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { return false } - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - // Check the indentation level against the current column. if !yaml_parser_unroll_indent(parser, parser.mark.column) { return false @@ -837,29 +819,30 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { "found character that cannot start any token") } -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") } + simple_key.possible = false + return false, true } - return true + return true, true } // Check if a simple key may start at the current position and add it if @@ -879,13 +862,14 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { possible: true, required: required, token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, } - simple_key.mark = parser.mark if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 } return true } @@ -900,19 +884,33 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { "while scanning a simple key", parser.simple_keys[i].mark, "could not find expected ':'") } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) } - // Remove the key from the stack. - parser.simple_keys[i].possible = false return true } +// max_flow_level limits the flow_level +const max_flow_level = 10000 + // Increase the flow level and resize the simple key list if needed. func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) // Increase the flow level. parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } return true } @@ -920,11 +918,16 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { if parser.flow_level > 0 { parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] } return true } +// max_indents limits the indents stack size +const max_indents = 10000 + // Push the current indentation level to the stack and set the new level // the current column is greater than the indentation level. In this case, // append or insert the specified token into the token queue. @@ -939,6 +942,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml // indentation level. parser.indents = append(parser.indents, parser.indent) parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } // Create a token and insert it into the queue. token := yaml_token_t{ @@ -989,6 +997,8 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { // Initialize the simple key stack. parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + parser.simple_keys_by_tok = make(map[int]int) + // A simple key is allowed at the beginning of the stream. parser.simple_key_allowed = true @@ -1270,7 +1280,11 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool { simple_key := &parser.simple_keys[len(parser.simple_keys)-1] // Have we found a simple key? - if simple_key.possible { + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + // Create the KEY token and insert it into the queue. token := yaml_token_t{ typ: yaml_KEY_TOKEN, @@ -1288,6 +1302,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool { // Remove the simple key. simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) // A simple key cannot follow another simple key. parser.simple_key_allowed = false diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index de85aa4c..89650e29 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -89,7 +89,7 @@ func UnmarshalStrict(in []byte, out interface{}) (err error) { return unmarshal(in, out, true) } -// A Decorder reads and decodes YAML values from an input stream. +// A Decoder reads and decodes YAML values from an input stream. type Decoder struct { strict bool parser *parser diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go index e25cee56..f6a9c8e3 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -579,6 +579,7 @@ type yaml_parser_t struct { simple_key_allowed bool // May a simple key occur at the current position? simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number // Parser stuff diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go new file mode 100644 index 00000000..c3940f09 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io + +// Package v1 is the v1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the +// new dynamic admission controller configuration. +package v1 // import "k8s.io/api/admissionregistration/v1" diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go new file mode 100644 index 00000000..1acb6345 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -0,0 +1,3469 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{7} +} +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1.ServiceReference") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhook") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfigurationList") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1.WebhookClientConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_aaac5994f79683e8) +} + +var fileDescriptor_aaac5994f79683e8 = []byte{ + // 1104 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0x46, 0x42, + 0x46, 0xc0, 0x6e, 0x13, 0x4a, 0xa9, 0xb8, 0xa0, 0x6c, 0xf8, 0xa3, 0x88, 0xa4, 0x8d, 0x26, 0x6d, + 0x8a, 0x50, 0x0e, 0x1d, 0xaf, 0xc7, 0xf6, 0x10, 0x7b, 0x67, 0x35, 0x33, 0xeb, 0x92, 0x1b, 0x1f, + 0x81, 0xaf, 0x00, 0x9f, 0x82, 0x1b, 0xe2, 0x96, 0x63, 0x8f, 0x39, 0xa0, 0x85, 0x2c, 0x17, 0x0e, + 0x7c, 0x82, 0x9c, 0xd0, 0xcc, 0xae, 0x77, 0xfd, 0x27, 0x09, 0x56, 0x0e, 0x3d, 0xe5, 0xb6, 0xf3, + 0x7b, 0xf3, 0x7e, 0x6f, 0xde, 0xdb, 0xf7, 0xde, 0x0f, 0xec, 0x1c, 0x3d, 0x12, 0x16, 0x65, 0xf6, + 0x51, 0xd0, 0x24, 0xdc, 0x23, 0x92, 0x08, 0x7b, 0x40, 0xbc, 0x16, 0xe3, 0x76, 0x62, 0xc0, 0x3e, + 0xb5, 0x71, 0xab, 0x4f, 0x85, 0xa0, 0xcc, 0xe3, 0xa4, 0x43, 0x85, 0xe4, 0x58, 0x52, 0xe6, 0xd9, + 0x83, 0x75, 0xbb, 0x43, 0x3c, 0xc2, 0xb1, 0x24, 0x2d, 0xcb, 0xe7, 0x4c, 0x32, 0xf8, 0x6e, 0xec, + 0x64, 0x61, 0x9f, 0x5a, 0x17, 0x3a, 0x59, 0x83, 0xf5, 0xb5, 0x8f, 0x3a, 0x54, 0x76, 0x83, 0xa6, + 0xe5, 0xb2, 0xbe, 0xdd, 0x61, 0x1d, 0x66, 0x6b, 0xdf, 0x66, 0xd0, 0xd6, 0x27, 0x7d, 0xd0, 0x5f, + 0x31, 0xe7, 0xda, 0x83, 0xec, 0x21, 0x7d, 0xec, 0x76, 0xa9, 0x47, 0xf8, 0xb1, 0xed, 0x1f, 0x75, + 0x14, 0x20, 0xec, 0x3e, 0x91, 0xf8, 0x82, 0x97, 0xac, 0xd9, 0x97, 0x79, 0xf1, 0xc0, 0x93, 0xb4, + 0x4f, 0xa6, 0x1c, 0x1e, 0xfe, 0x9f, 0x83, 0x70, 0xbb, 0xa4, 0x8f, 0x27, 0xfd, 0xea, 0xbf, 0x2d, + 0x80, 0x95, 0xdd, 0x40, 0x62, 0x49, 0xbd, 0xce, 0x73, 0xd2, 0xec, 0x32, 0x76, 0x04, 0x6b, 0x20, + 0xef, 0xe1, 0x3e, 0xa9, 0x18, 0x35, 0xa3, 0x51, 0x72, 0x16, 0x4f, 0x42, 0x73, 0x2e, 0x0a, 0xcd, + 0xfc, 0x63, 0xdc, 0x27, 0x48, 0x5b, 0x20, 0x07, 0x8b, 0x6e, 0x8f, 0x12, 0x4f, 0x6e, 0x31, 0xaf, + 0x4d, 0x3b, 0x95, 0xf9, 0x9a, 0xd1, 0x28, 0x6f, 0x3c, 0xb2, 0x66, 0xa8, 0x9f, 0x95, 0x44, 0xd9, + 0x1a, 0xf1, 0x77, 0xde, 0x4c, 0x62, 0x2c, 0x8e, 0xa2, 0x68, 0x2c, 0x06, 0x3c, 0x04, 0x05, 0x1e, + 0xf4, 0x88, 0xa8, 0xe4, 0x6a, 0xb9, 0x46, 0x79, 0xe3, 0xd3, 0x99, 0x82, 0xa1, 0xa0, 0x47, 0x9e, + 0x53, 0xd9, 0x7d, 0xe2, 0x93, 0x18, 0x14, 0xce, 0x52, 0x12, 0xab, 0xa0, 0x6c, 0x02, 0xc5, 0xa4, + 0x70, 0x07, 0x2c, 0xb5, 0x31, 0xed, 0x05, 0x9c, 0xec, 0xb1, 0x1e, 0x75, 0x8f, 0x2b, 0x79, 0x9d, + 0xfc, 0x7b, 0x51, 0x68, 0x2e, 0x7d, 0x35, 0x6a, 0x38, 0x0f, 0xcd, 0xd5, 0x31, 0xe0, 0xe9, 0xb1, + 0x4f, 0xd0, 0xb8, 0x33, 0xfc, 0x02, 0x94, 0xfb, 0x58, 0xba, 0xdd, 0x84, 0xab, 0xa4, 0xb9, 0xea, + 0x51, 0x68, 0x96, 0x77, 0x33, 0xf8, 0x3c, 0x34, 0x57, 0x46, 0x8e, 0x9a, 0x67, 0xd4, 0x0d, 0xfe, + 0x00, 0x56, 0x55, 0xb5, 0x85, 0x8f, 0x5d, 0xb2, 0x4f, 0x7a, 0xc4, 0x95, 0x8c, 0x57, 0x0a, 0xba, + 0xd4, 0x1f, 0x8f, 0x64, 0x9f, 0xfe, 0x6f, 0xcb, 0x3f, 0xea, 0x28, 0x40, 0x58, 0xaa, 0xad, 0x54, + 0xfa, 0x3b, 0xb8, 0x49, 0x7a, 0x43, 0x57, 0xe7, 0xad, 0x28, 0x34, 0x57, 0x1f, 0x4f, 0x32, 0xa2, + 0xe9, 0x20, 0x90, 0x81, 0x65, 0xd6, 0xfc, 0x9e, 0xb8, 0x32, 0x0d, 0x5b, 0xbe, 0x7e, 0x58, 0x18, + 0x85, 0xe6, 0xf2, 0x93, 0x31, 0x3a, 0x34, 0x41, 0xaf, 0x0a, 0x26, 0x68, 0x8b, 0x7c, 0xd9, 0x6e, + 0x13, 0x57, 0x8a, 0xca, 0xad, 0xac, 0x60, 0xfb, 0x19, 0xac, 0x0a, 0x96, 0x1d, 0xb7, 0x7a, 0x58, + 0x08, 0x34, 0xea, 0x06, 0x3f, 0x03, 0xcb, 0xaa, 0xd7, 0x59, 0x20, 0xf7, 0x89, 0xcb, 0xbc, 0x96, + 0xa8, 0x2c, 0xd4, 0x8c, 0x46, 0x21, 0x7e, 0xc1, 0xd3, 0x31, 0x0b, 0x9a, 0xb8, 0x09, 0x9f, 0x81, + 0xbb, 0x69, 0x17, 0x21, 0x32, 0xa0, 0xe4, 0xe5, 0x01, 0xe1, 0xea, 0x20, 0x2a, 0xc5, 0x5a, 0xae, + 0x51, 0x72, 0xde, 0x89, 0x42, 0xf3, 0xee, 0xe6, 0xc5, 0x57, 0xd0, 0x65, 0xbe, 0xf0, 0x05, 0x80, + 0x9c, 0x50, 0x6f, 0xc0, 0x5c, 0xdd, 0x7e, 0x49, 0x43, 0x00, 0x9d, 0xdf, 0xfd, 0x28, 0x34, 0x21, + 0x9a, 0xb2, 0x9e, 0x87, 0xe6, 0x9d, 0x69, 0x54, 0xb7, 0xc7, 0x05, 0x5c, 0xf5, 0x53, 0x03, 0xdc, + 0x9b, 0x98, 0xe0, 0x78, 0x62, 0x82, 0xb8, 0xe3, 0xe1, 0x0b, 0x50, 0x54, 0x3f, 0xa6, 0x85, 0x25, + 0xd6, 0x23, 0x5d, 0xde, 0xb8, 0x3f, 0xdb, 0x6f, 0x8c, 0xff, 0xd9, 0x2e, 0x91, 0xd8, 0x81, 0xc9, + 0xd0, 0x80, 0x0c, 0x43, 0x29, 0x2b, 0x3c, 0x00, 0xc5, 0x24, 0xb2, 0xa8, 0xcc, 0xeb, 0xe9, 0x7c, + 0x30, 0xd3, 0x74, 0x4e, 0x3c, 0xdb, 0xc9, 0xab, 0x28, 0xa8, 0xf8, 0x32, 0xe1, 0xaa, 0xff, 0x63, + 0x80, 0xda, 0x55, 0xa9, 0xed, 0x50, 0x21, 0xe1, 0xe1, 0x54, 0x7a, 0xd6, 0x8c, 0x5d, 0x4a, 0x45, + 0x9c, 0xdc, 0xed, 0x24, 0xb9, 0xe2, 0x10, 0x19, 0x49, 0xad, 0x0d, 0x0a, 0x54, 0x92, 0xfe, 0x30, + 0xaf, 0xcd, 0xeb, 0xe4, 0x35, 0xf6, 0xe6, 0x6c, 0xff, 0x6c, 0x2b, 0x5e, 0x14, 0xd3, 0xd7, 0x7f, + 0x37, 0x40, 0x5e, 0x2d, 0x24, 0xf8, 0x01, 0x28, 0x61, 0x9f, 0x7e, 0xcd, 0x59, 0xe0, 0x8b, 0x8a, + 0xa1, 0x3b, 0x6f, 0x29, 0x0a, 0xcd, 0xd2, 0xe6, 0xde, 0x76, 0x0c, 0xa2, 0xcc, 0x0e, 0xd7, 0x41, + 0x19, 0xfb, 0x34, 0x6d, 0xd4, 0x79, 0x7d, 0x7d, 0x45, 0x8d, 0xcd, 0xe6, 0xde, 0x76, 0xda, 0x9c, + 0xa3, 0x77, 0x14, 0x3f, 0x27, 0x82, 0x05, 0xdc, 0x4d, 0x56, 0x69, 0xc2, 0x8f, 0x86, 0x20, 0xca, + 0xec, 0xf0, 0x43, 0x50, 0x10, 0x2e, 0xf3, 0x49, 0xb2, 0x0d, 0xef, 0xa8, 0x67, 0xef, 0x2b, 0xe0, + 0x3c, 0x34, 0x4b, 0xfa, 0x43, 0xb7, 0x65, 0x7c, 0xa9, 0xfe, 0x8b, 0x01, 0xe0, 0xf4, 0xc2, 0x85, + 0x9f, 0x03, 0xc0, 0xd2, 0x53, 0x92, 0x92, 0xa9, 0x7b, 0x29, 0x45, 0xcf, 0x43, 0x73, 0x29, 0x3d, + 0x69, 0xca, 0x11, 0x17, 0xf8, 0x0d, 0xc8, 0xab, 0x25, 0x9d, 0xa8, 0xcc, 0xfb, 0x33, 0x2f, 0xfe, + 0x4c, 0xba, 0xd4, 0x09, 0x69, 0x92, 0xfa, 0xcf, 0x06, 0xb8, 0xbd, 0x4f, 0xf8, 0x80, 0xba, 0x04, + 0x91, 0x36, 0xe1, 0xc4, 0x73, 0x09, 0xb4, 0x41, 0x29, 0x5d, 0x82, 0x89, 0xec, 0xad, 0x26, 0xbe, + 0xa5, 0x74, 0x61, 0xa2, 0xec, 0x4e, 0x2a, 0x91, 0xf3, 0x97, 0x4a, 0xe4, 0x3d, 0x90, 0xf7, 0xb1, + 0xec, 0x56, 0x72, 0xfa, 0x46, 0x51, 0x59, 0xf7, 0xb0, 0xec, 0x22, 0x8d, 0x6a, 0x2b, 0xe3, 0x52, + 0xd7, 0xb5, 0x90, 0x58, 0x19, 0x97, 0x48, 0xa3, 0xf5, 0x3f, 0x6f, 0x81, 0xd5, 0x03, 0xdc, 0xa3, + 0xad, 0x1b, 0x59, 0xbe, 0x91, 0xe5, 0x2b, 0x65, 0x19, 0xdc, 0xc8, 0xf2, 0x75, 0x64, 0xb9, 0xfe, + 0x87, 0x01, 0xaa, 0x53, 0x13, 0xf6, 0xba, 0x65, 0xf3, 0xdb, 0x29, 0xd9, 0x7c, 0x38, 0xd3, 0xf4, + 0x4c, 0x3d, 0x7c, 0x4a, 0x38, 0xff, 0x35, 0x40, 0xfd, 0xea, 0xf4, 0x5e, 0x83, 0x74, 0x76, 0xc7, + 0xa5, 0x73, 0xeb, 0x7a, 0xb9, 0xcd, 0x22, 0x9e, 0xbf, 0x1a, 0xe0, 0x8d, 0x0b, 0xf6, 0x17, 0x7c, + 0x1b, 0xe4, 0x02, 0xde, 0x4b, 0x56, 0xf0, 0x42, 0x14, 0x9a, 0xb9, 0x67, 0x68, 0x07, 0x29, 0x0c, + 0x1e, 0x82, 0x05, 0x11, 0xab, 0x40, 0x92, 0xf9, 0x27, 0x33, 0x3d, 0x6f, 0x52, 0x39, 0x9c, 0x72, + 0x14, 0x9a, 0x0b, 0x43, 0x74, 0x48, 0x09, 0x1b, 0xa0, 0xe8, 0x62, 0x27, 0xf0, 0x5a, 0x89, 0x6a, + 0x2d, 0x3a, 0x8b, 0xaa, 0x48, 0x5b, 0x9b, 0x31, 0x86, 0x52, 0xab, 0xd3, 0x38, 0x39, 0xab, 0xce, + 0xbd, 0x3a, 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8c, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, + 0x45, 0x55, 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x45, 0x55, 0xe3, 0xa7, 0xbf, 0xab, 0x73, 0xdf, 0xcd, + 0x0f, 0xd6, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x57, 0x91, 0x2c, 0x7b, 0xeb, 0x0e, 0x00, 0x00, +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIVersions) > 0 { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto new file mode 100644 index 00000000..f102f3a7 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -0,0 +1,479 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +message MutatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + repeated string admissionReviewVersions = 8; + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + optional string reinvocationPolicy = 10; +} + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated MutatingWebhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + optional string scope = 4; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +message ValidatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + repeated string admissionReviewVersions = 8; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ValidatingWebhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1/register.go b/vendor/k8s.io/api/admissionregistration/v1/register.go new file mode 100644 index 00000000..716ce7fc --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go new file mode 100644 index 00000000..114a4c68 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -0,0 +1,551 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` +} + +type ScopeType string + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = "Cluster" + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = "Namespaced" + // AllScopes means that all scopes are included. + AllScopes ScopeType = "*" +) + +type FailurePolicyType string + +const ( + // Ignore means that an error calling the webhook is ignored. + Ignore FailurePolicyType = "Ignore" + // Fail means that an error calling the webhook causes the admission to fail. + Fail FailurePolicyType = "Fail" +) + +// MatchPolicyType specifies the type of match policy +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + +type SideEffectClass string + +const ( + // SideEffectClassUnknown means that no information is known about the side effects of calling the webhook. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassUnknown SideEffectClass = "Unknown" + // SideEffectClassNone means that calling the webhook will have no side effects. + SideEffectClassNone SideEffectClass = "None" + // SideEffectClassSome means that calling the webhook will possibly have side effects. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassSome SideEffectClass = "Some" + // SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the + // request being reviewed has the dry-run attribute, the side effects will be suppressed. + SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +type ValidatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + SideEffects *SideEffectClass `json:"sideEffects" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` +} + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +type MutatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + SideEffects *SideEffectClass `json:"sideEffects" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// ReinvocationPolicyType specifies what type of policy the admission hook uses. +type ReinvocationPolicyType string + +const ( + // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = "Never" + // IfNeededReinvocationPolicy indicates that the webhook may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial webhook call. + IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" +) + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go new file mode 100644 index 00000000..2fde0ce3 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhook = map[string]string{ + "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", +} + +func (MutatingWebhook) SwaggerDoc() map[string]string { + return map_MutatingWebhook +} + +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhook = map[string]string{ + "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", +} + +func (ValidatingWebhook) SwaggerDoc() map[string]string { + return map_ValidatingWebhook +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..3afb7467 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -0,0 +1,396 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(ReinvocationPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. +func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { + if in == nil { + return nil + } + out := new(MutatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]MutatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. +func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { + if in == nil { + return nil + } + out := new(ValidatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]ValidatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 1114e29a..d84d8b63 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -17,35 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto - - It has these top-level messages: - MutatingWebhookConfiguration - MutatingWebhookConfigurationList - Rule - RuleWithOperations - ServiceReference - ValidatingWebhookConfiguration - ValidatingWebhookConfigurationList - Webhook - WebhookClientConfig -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -58,141 +44,593 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo + func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_abeea74cbc46f55a, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) } +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_abeea74cbc46f55a, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) } -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_abeea74cbc46f55a, []int{7} +} +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() } +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_abeea74cbc46f55a, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) } -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") - proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") } -func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) +} + +var fileDescriptor_abeea74cbc46f55a = []byte{ + // 1114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x23, 0x45, + 0x13, 0xce, 0xc4, 0xf6, 0xda, 0x6e, 0x27, 0xbb, 0x9b, 0x7e, 0x5f, 0x76, 0x4d, 0x58, 0x79, 0x2c, + 0x1f, 0x90, 0x25, 0xd8, 0x99, 0x4d, 0x40, 0x08, 0x16, 0x10, 0x8a, 0x03, 0x0b, 0x91, 0x92, 0xdd, + 0xd0, 0xd9, 0x0f, 0x89, 0x0f, 0x69, 0xdb, 0xe3, 0xb2, 0xdd, 0xd8, 0x9e, 0x1e, 0x4d, 0xf7, 0x38, + 0xe4, 0xc6, 0x4f, 0xe0, 0x2f, 0x70, 0xe2, 0x57, 0x70, 0xe0, 0x16, 0x6e, 0x7b, 0xdc, 0x0b, 0x23, + 0x32, 0x9c, 0x38, 0x70, 0xe0, 0x9a, 0x13, 0x9a, 0x9e, 0xf6, 0xf8, 0x2b, 0x59, 0x4c, 0x90, 0xf6, + 0x94, 0xdb, 0xf4, 0x53, 0x5d, 0x4f, 0x75, 0xd5, 0x54, 0xd5, 0x83, 0x3e, 0xef, 0xbd, 0x2b, 0x2c, + 0xc6, 0xed, 0x5e, 0xd0, 0x04, 0xdf, 0x05, 0x09, 0xc2, 0x1e, 0x82, 0xdb, 0xe2, 0xbe, 0xad, 0x0d, + 0xd4, 0x63, 0x36, 0x6d, 0x0d, 0x98, 0x10, 0x8c, 0xbb, 0x3e, 0x74, 0x98, 0x90, 0x3e, 0x95, 0x8c, + 0xbb, 0xf6, 0x70, 0xa3, 0x09, 0x92, 0x6e, 0xd8, 0x1d, 0x70, 0xc1, 0xa7, 0x12, 0x5a, 0x96, 0xe7, + 0x73, 0xc9, 0x71, 0x3d, 0xf1, 0xb4, 0xa8, 0xc7, 0xac, 0x33, 0x3d, 0x2d, 0xed, 0xb9, 0x7e, 0xbb, + 0xc3, 0x64, 0x37, 0x68, 0x5a, 0x0e, 0x1f, 0xd8, 0x1d, 0xde, 0xe1, 0xb6, 0x22, 0x68, 0x06, 0x6d, + 0x75, 0x52, 0x07, 0xf5, 0x95, 0x10, 0xaf, 0xbf, 0x3d, 0x7e, 0xd2, 0x80, 0x3a, 0x5d, 0xe6, 0x82, + 0x7f, 0x64, 0x7b, 0xbd, 0x4e, 0x0c, 0x08, 0x7b, 0x00, 0x92, 0xda, 0xc3, 0xb9, 0xe7, 0xac, 0xdb, + 0xe7, 0x79, 0xf9, 0x81, 0x2b, 0xd9, 0x00, 0xe6, 0x1c, 0xde, 0xf9, 0x27, 0x07, 0xe1, 0x74, 0x61, + 0x40, 0x67, 0xfd, 0x6a, 0xbf, 0xe4, 0xd1, 0xb5, 0xbd, 0x40, 0x52, 0xc9, 0xdc, 0xce, 0x13, 0x68, + 0x76, 0x39, 0xef, 0xe1, 0x2a, 0xca, 0xba, 0x74, 0x00, 0x65, 0xa3, 0x6a, 0xd4, 0x8b, 0x8d, 0x95, + 0xe3, 0xd0, 0x5c, 0x8a, 0x42, 0x33, 0x7b, 0x9f, 0x0e, 0x80, 0x28, 0x0b, 0x3e, 0x44, 0x2b, 0x4e, + 0x9f, 0x81, 0x2b, 0xb7, 0xb9, 0xdb, 0x66, 0x9d, 0xf2, 0x72, 0xd5, 0xa8, 0x97, 0x36, 0x3f, 0xb4, + 0x16, 0x2d, 0xa2, 0xa5, 0x43, 0x6d, 0x4f, 0x90, 0x34, 0xfe, 0xaf, 0x03, 0xad, 0x4c, 0xa2, 0x64, + 0x2a, 0x10, 0xa6, 0x28, 0xe7, 0x07, 0x7d, 0x10, 0xe5, 0x4c, 0x35, 0x53, 0x2f, 0x6d, 0x7e, 0xb0, + 0x78, 0x44, 0x12, 0xf4, 0xe1, 0x09, 0x93, 0xdd, 0x07, 0x1e, 0x24, 0x16, 0xd1, 0x58, 0xd5, 0x01, + 0x73, 0xb1, 0x4d, 0x90, 0x84, 0x19, 0xef, 0xa2, 0xd5, 0x36, 0x65, 0xfd, 0xc0, 0x87, 0x7d, 0xde, + 0x67, 0xce, 0x51, 0x39, 0xab, 0xca, 0xf0, 0x7a, 0x14, 0x9a, 0xab, 0xf7, 0x26, 0x0d, 0xa7, 0xa1, + 0xb9, 0x36, 0x05, 0x3c, 0x3c, 0xf2, 0x80, 0x4c, 0x3b, 0xe3, 0x8f, 0x51, 0x69, 0x40, 0xa5, 0xd3, + 0xd5, 0x5c, 0x45, 0xc5, 0x55, 0x8b, 0x42, 0xb3, 0xb4, 0x37, 0x86, 0x4f, 0x43, 0xf3, 0xda, 0xc4, + 0x51, 0xf1, 0x4c, 0xba, 0xe1, 0x6f, 0xd1, 0x5a, 0x5c, 0x77, 0xe1, 0x51, 0x07, 0x0e, 0xa0, 0x0f, + 0x8e, 0xe4, 0x7e, 0x39, 0xa7, 0x8a, 0xfe, 0xd6, 0x44, 0x09, 0xd2, 0x3f, 0x6f, 0x79, 0xbd, 0x4e, + 0x0c, 0x08, 0x2b, 0x6e, 0x30, 0x6b, 0xb8, 0x61, 0xed, 0xd2, 0x26, 0xf4, 0x47, 0xae, 0x8d, 0x57, + 0xa2, 0xd0, 0x5c, 0xbb, 0x3f, 0xcb, 0x48, 0xe6, 0x83, 0x60, 0x8e, 0xae, 0xf2, 0xe6, 0x37, 0xe0, + 0xc8, 0x34, 0x6c, 0xe9, 0xe2, 0x61, 0x71, 0x14, 0x9a, 0x57, 0x1f, 0x4c, 0xd1, 0x91, 0x19, 0xfa, + 0xb8, 0x60, 0x82, 0xb5, 0xe0, 0x93, 0x76, 0x1b, 0x1c, 0x29, 0xca, 0x57, 0xc6, 0x05, 0x3b, 0x18, + 0xc3, 0x71, 0xc1, 0xc6, 0xc7, 0xed, 0x3e, 0x15, 0x82, 0x4c, 0xba, 0xe1, 0xbb, 0xe8, 0x6a, 0xdc, + 0xf5, 0x3c, 0x90, 0x07, 0xe0, 0x70, 0xb7, 0x25, 0xca, 0xf9, 0xaa, 0x51, 0xcf, 0x25, 0x2f, 0x78, + 0x38, 0x65, 0x21, 0x33, 0x37, 0xf1, 0x23, 0x74, 0x33, 0x6d, 0x25, 0x02, 0x43, 0x06, 0x87, 0x8f, + 0xc1, 0x8f, 0x0f, 0xa2, 0x5c, 0xa8, 0x66, 0xea, 0xc5, 0xc6, 0x6b, 0x51, 0x68, 0xde, 0xdc, 0x3a, + 0xfb, 0x0a, 0x39, 0xcf, 0x17, 0x3f, 0x45, 0xd8, 0x07, 0xe6, 0x0e, 0xb9, 0xa3, 0xda, 0x4f, 0x37, + 0x04, 0x52, 0xf9, 0xdd, 0x89, 0x42, 0x13, 0x93, 0x39, 0xeb, 0x69, 0x68, 0xde, 0x98, 0x47, 0x55, + 0x7b, 0x9c, 0xc1, 0x55, 0xfb, 0xd5, 0x40, 0xb7, 0x66, 0x66, 0x39, 0x19, 0x9b, 0x20, 0xe9, 0x78, + 0xfc, 0x14, 0x15, 0xe2, 0x1f, 0xd3, 0xa2, 0x92, 0xaa, 0xe1, 0x2e, 0x6d, 0xde, 0x59, 0xec, 0x37, + 0x26, 0xff, 0x6c, 0x0f, 0x24, 0x6d, 0x60, 0x3d, 0x34, 0x68, 0x8c, 0x91, 0x94, 0x15, 0x7f, 0x89, + 0x0a, 0x3a, 0xb2, 0x28, 0x2f, 0xab, 0x11, 0x7d, 0x6f, 0xf1, 0x11, 0x9d, 0x79, 0x7b, 0x23, 0x1b, + 0x87, 0x22, 0x85, 0x43, 0x4d, 0x58, 0xfb, 0xd3, 0x40, 0xd5, 0x17, 0xe5, 0xb7, 0xcb, 0x84, 0xc4, + 0x5f, 0xcd, 0xe5, 0x68, 0x2d, 0xd8, 0xaa, 0x4c, 0x24, 0x19, 0x5e, 0xd7, 0x19, 0x16, 0x46, 0xc8, + 0x44, 0x7e, 0x3d, 0x94, 0x63, 0x12, 0x06, 0xa3, 0xe4, 0xee, 0x5d, 0x38, 0xb9, 0xa9, 0x87, 0x8f, + 0x37, 0xd1, 0x4e, 0x4c, 0x4e, 0x92, 0x18, 0xb5, 0x9f, 0x0d, 0x94, 0x8d, 0x57, 0x13, 0x7e, 0x03, + 0x15, 0xa9, 0xc7, 0x3e, 0xf5, 0x79, 0xe0, 0x89, 0xb2, 0xa1, 0x7a, 0x70, 0x35, 0x0a, 0xcd, 0xe2, + 0xd6, 0xfe, 0x4e, 0x02, 0x92, 0xb1, 0x1d, 0x6f, 0xa0, 0x12, 0xf5, 0x58, 0xda, 0xb2, 0xcb, 0xea, + 0xfa, 0xb5, 0x78, 0x80, 0xb6, 0xf6, 0x77, 0xd2, 0x36, 0x9d, 0xbc, 0x13, 0xf3, 0xfb, 0x20, 0x78, + 0xe0, 0x3b, 0x7a, 0xb3, 0x6a, 0x7e, 0x32, 0x02, 0xc9, 0xd8, 0x8e, 0xdf, 0x44, 0x39, 0xe1, 0x70, + 0x0f, 0xf4, 0x5e, 0xbc, 0x11, 0x3f, 0xfb, 0x20, 0x06, 0x4e, 0x43, 0xb3, 0xa8, 0x3e, 0x54, 0x83, + 0x26, 0x97, 0x6a, 0x3f, 0x1a, 0x08, 0xcf, 0xaf, 0x5e, 0xfc, 0x11, 0x42, 0x3c, 0x3d, 0xe9, 0x94, + 0x4c, 0xd5, 0x55, 0x29, 0x7a, 0x1a, 0x9a, 0xab, 0xe9, 0x49, 0x51, 0x4e, 0xb8, 0xe0, 0x7d, 0x94, + 0x8d, 0xd7, 0xb5, 0x56, 0x1e, 0xeb, 0xdf, 0xe9, 0xc0, 0x58, 0xd3, 0xe2, 0x13, 0x51, 0x4c, 0xb5, + 0x1f, 0x0c, 0x74, 0xfd, 0x00, 0xfc, 0x21, 0x73, 0x80, 0x40, 0x1b, 0x7c, 0x70, 0x1d, 0xc0, 0x36, + 0x2a, 0xa6, 0x3b, 0x51, 0xeb, 0xe1, 0x9a, 0xf6, 0x2d, 0xa6, 0xfb, 0x93, 0x8c, 0xef, 0xa4, 0xda, + 0xb9, 0x7c, 0xae, 0x76, 0xde, 0x42, 0x59, 0x8f, 0xca, 0x6e, 0x39, 0xa3, 0x6e, 0x14, 0x62, 0xeb, + 0x3e, 0x95, 0x5d, 0xa2, 0x50, 0x65, 0xe5, 0xbe, 0x54, 0xc5, 0xcd, 0x69, 0x2b, 0xf7, 0x25, 0x51, + 0x68, 0xed, 0x8f, 0x2b, 0x68, 0xed, 0x31, 0xed, 0xb3, 0xd6, 0xa5, 0x5e, 0x5f, 0xea, 0xf5, 0x82, + 0x7a, 0x8d, 0x2e, 0xf5, 0xfa, 0x22, 0x7a, 0x5d, 0x3b, 0x31, 0x50, 0x65, 0x6e, 0xd6, 0x5e, 0xb6, + 0x9e, 0x7e, 0x3d, 0xa7, 0xa7, 0xef, 0x2f, 0x3e, 0x42, 0x73, 0xaf, 0x9f, 0x53, 0xd4, 0xbf, 0x0c, + 0x54, 0x7b, 0x71, 0x8e, 0x2f, 0x41, 0x53, 0x07, 0xd3, 0x9a, 0xfa, 0xd9, 0x7f, 0x48, 0x70, 0x11, + 0x55, 0xfd, 0xc9, 0x40, 0xff, 0x3b, 0x63, 0x9d, 0xe1, 0x57, 0x51, 0x26, 0xf0, 0xfb, 0x7a, 0x2d, + 0xe7, 0xa3, 0xd0, 0xcc, 0x3c, 0x22, 0xbb, 0x24, 0xc6, 0x30, 0x45, 0x79, 0x91, 0x28, 0x83, 0x4e, + 0xff, 0xee, 0xe2, 0x6f, 0x9c, 0x95, 0x94, 0x46, 0x29, 0x0a, 0xcd, 0xfc, 0x08, 0x1d, 0xf1, 0xe2, + 0x3a, 0x2a, 0x38, 0xb4, 0x11, 0xb8, 0x2d, 0xad, 0x69, 0x2b, 0x8d, 0x95, 0xb8, 0x5c, 0xdb, 0x5b, + 0x09, 0x46, 0x52, 0x6b, 0xe3, 0xf6, 0xf1, 0x49, 0x65, 0xe9, 0xd9, 0x49, 0x65, 0xe9, 0xf9, 0x49, + 0x65, 0xe9, 0xbb, 0xa8, 0x62, 0x1c, 0x47, 0x15, 0xe3, 0x59, 0x54, 0x31, 0x9e, 0x47, 0x15, 0xe3, + 0xb7, 0xa8, 0x62, 0x7c, 0xff, 0x7b, 0x65, 0xe9, 0x8b, 0xbc, 0x8e, 0xff, 0x77, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xae, 0x27, 0x2b, 0xcb, 0x2c, 0x0f, 0x00, 0x00, +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } - i += n1 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n2 + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -200,68 +638,56 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { } func (m *Rule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a } } if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa } } - if m.Scope != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) - i += copy(dAtA[i:], *m.Scope) - } - return i, nil + return len(dAtA) - i, nil } func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -269,40 +695,41 @@ func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { } func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n3, err := m.Rule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i += n3 - return i, nil + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -310,191 +737,249 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } if m.Path != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - i += n4 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Webhook) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n7 - } - if m.SideEffects != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i += copy(dAtA[i:], *m.SideEffects) - } - if m.TimeoutSeconds != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - dAtA[i] = 0x42 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -502,45 +987,111 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n8, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a } if m.CABundle != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + i-- + dAtA[i] = 0x12 } - if m.URL != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base +} +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } + func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -555,6 +1106,9 @@ func (m *MutatingWebhookConfiguration) Size() (n int) { } func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -569,6 +1123,9 @@ func (m *MutatingWebhookConfigurationList) Size() (n int) { } func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.APIGroups) > 0 { @@ -597,6 +1154,9 @@ func (m *Rule) Size() (n int) { } func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Operations) > 0 { @@ -611,6 +1171,9 @@ func (m *RuleWithOperations) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -621,38 +1184,16 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *ValidatingWebhookConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) } return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 } - return n -} - -func (m *Webhook) Size() (n int) { var l int _ = l l = len(m.Name) @@ -686,10 +1227,55 @@ func (m *Webhook) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Service != nil { @@ -708,36 +1294,64 @@ func (m *WebhookClientConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *MutatingWebhookConfiguration) String() string { +func (this *MutatingWebhook) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s } -func (this *MutatingWebhookConfigurationList) String() string { +func (this *MutatingWebhookConfiguration) String() string { if this == nil { return "nil" } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -774,45 +1388,63 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfiguration) String() string { +func (this *ValidatingWebhook) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfigurationList) String() string { +func (this *ValidatingWebhookConfiguration) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, `}`, }, "") return s } -func (this *Webhook) String() string { +func (this *ValidatingWebhookConfigurationList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Webhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -822,7 +1454,7 @@ func (this *WebhookClientConfig) String() string { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, @@ -837,7 +1469,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -852,7 +1484,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -860,15 +1492,47 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -880,7 +1544,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -889,16 +1553,19 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -910,7 +1577,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -919,69 +1586,144 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, Webhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } - if skippy < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } - if iNdEx >= l { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) } - var msglen int + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -991,25 +1733,636 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1021,7 +2374,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1030,11 +2383,13 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1047,6 +2402,9 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1059,7 +2417,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Rule) Unmarshal(dAtA []byte) error { +func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1074,7 +2432,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1082,15 +2440,15 @@ func (m *Rule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1102,7 +2460,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1112,14 +2470,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1131,7 +2492,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1141,14 +2502,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1160,7 +2524,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1170,16 +2534,20 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1189,22 +2557,12 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := ScopeType(dAtA[iNdEx:postIndex]) - m.Scope = &s - iNdEx = postIndex + m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1214,6 +2572,9 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1226,7 +2587,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1241,7 +2602,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1249,15 +2610,15 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1269,7 +2630,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1279,14 +2640,17 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1298,7 +2662,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1307,66 +2671,53 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1378,7 +2729,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1388,16 +2739,20 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1407,24 +2762,31 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1436,7 +2798,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1446,67 +2808,72 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1516,25 +2883,28 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1546,7 +2916,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1555,11 +2925,16 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, Webhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1572,6 +2947,9 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1584,7 +2962,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1599,7 +2977,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1607,15 +2985,15 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1627,7 +3005,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1636,16 +3014,19 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1657,7 +3038,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1666,11 +3047,14 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1683,6 +3067,9 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1695,7 +3082,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Webhook) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1710,7 +3097,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1718,44 +3105,15 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1767,7 +3125,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1776,77 +3134,19 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1858,7 +3158,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1867,94 +3167,16 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -1965,6 +3187,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1992,7 +3217,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2020,7 +3245,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2029,6 +3254,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2053,7 +3281,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2062,6 +3290,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2084,7 +3315,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2094,6 +3325,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2109,6 +3343,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2175,10 +3412,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2207,6 +3447,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2225,73 +3468,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 989 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x55, 0x4f, 0x6f, 0xe3, 0x44, - 0x14, 0xaf, 0x37, 0x09, 0x49, 0x26, 0xed, 0xee, 0x76, 0xf8, 0xb3, 0xa1, 0xac, 0xec, 0x28, 0x07, - 0x14, 0x09, 0xd6, 0xa6, 0x05, 0x21, 0xb4, 0x02, 0xa1, 0xba, 0xb0, 0x50, 0xa9, 0xbb, 0x5b, 0x26, - 0xfb, 0x47, 0x42, 0x1c, 0x98, 0x38, 0x2f, 0xc9, 0x10, 0xc7, 0x63, 0x79, 0xc6, 0x29, 0xbd, 0x21, - 0xf1, 0x05, 0xf8, 0x16, 0xf0, 0x25, 0x38, 0x70, 0xeb, 0x71, 0x2f, 0x88, 0x3d, 0x59, 0xd4, 0x9c, - 0x39, 0x70, 0xed, 0x09, 0x8d, 0xed, 0xd8, 0x49, 0xd3, 0x76, 0xb3, 0x17, 0x0e, 0xdc, 0x3c, 0xbf, - 0xf7, 0x7e, 0xef, 0xbd, 0xdf, 0xcc, 0x7b, 0xcf, 0xe8, 0xab, 0xf1, 0x47, 0xc2, 0x64, 0xdc, 0x1a, - 0x87, 0x3d, 0x08, 0x3c, 0x90, 0x20, 0xac, 0x29, 0x78, 0x7d, 0x1e, 0x58, 0x99, 0x81, 0xfa, 0xcc, - 0xa2, 0xfd, 0x09, 0x13, 0x82, 0x71, 0x2f, 0x80, 0x21, 0x13, 0x32, 0xa0, 0x92, 0x71, 0xcf, 0x9a, - 0x6e, 0xf7, 0x40, 0xd2, 0x6d, 0x6b, 0x08, 0x1e, 0x04, 0x54, 0x42, 0xdf, 0xf4, 0x03, 0x2e, 0x39, - 0xee, 0xa4, 0x4c, 0x93, 0xfa, 0xcc, 0xbc, 0x90, 0x69, 0x66, 0xcc, 0xad, 0x3b, 0x43, 0x26, 0x47, - 0x61, 0xcf, 0x74, 0xf8, 0xc4, 0x1a, 0xf2, 0x21, 0xb7, 0x92, 0x00, 0xbd, 0x70, 0x90, 0x9c, 0x92, - 0x43, 0xf2, 0x95, 0x06, 0xde, 0xfa, 0xa0, 0x28, 0x69, 0x42, 0x9d, 0x11, 0xf3, 0x20, 0x38, 0xb6, - 0xfc, 0xf1, 0x50, 0x01, 0xc2, 0x9a, 0x80, 0xa4, 0xd6, 0x74, 0xa9, 0x9c, 0x2d, 0xeb, 0x32, 0x56, - 0x10, 0x7a, 0x92, 0x4d, 0x60, 0x89, 0xf0, 0xe1, 0x8b, 0x08, 0xc2, 0x19, 0xc1, 0x84, 0x9e, 0xe7, - 0xb5, 0x7f, 0xd7, 0xd0, 0xed, 0xfb, 0xa1, 0xa4, 0x92, 0x79, 0xc3, 0xa7, 0xd0, 0x1b, 0x71, 0x3e, - 0xde, 0xe3, 0xde, 0x80, 0x0d, 0xc3, 0x54, 0x36, 0xfe, 0x16, 0xd5, 0x54, 0x91, 0x7d, 0x2a, 0x69, - 0x53, 0x6b, 0x69, 0x9d, 0xc6, 0xce, 0x7b, 0x66, 0x71, 0x57, 0x79, 0x2e, 0xd3, 0x1f, 0x0f, 0x15, - 0x20, 0x4c, 0xe5, 0x6d, 0x4e, 0xb7, 0xcd, 0x87, 0xbd, 0xef, 0xc0, 0x91, 0xf7, 0x41, 0x52, 0x1b, - 0x9f, 0x44, 0xc6, 0x5a, 0x1c, 0x19, 0xa8, 0xc0, 0x48, 0x1e, 0x15, 0x77, 0x51, 0x2d, 0xcb, 0x2c, - 0x9a, 0xd7, 0x5a, 0xa5, 0x4e, 0x63, 0x67, 0xdb, 0x5c, 0xf5, 0x35, 0xcc, 0x8c, 0x69, 0x97, 0x55, - 0x0a, 0x52, 0x3b, 0xca, 0x02, 0xb5, 0xff, 0xd6, 0x50, 0xeb, 0x2a, 0x5d, 0x07, 0x4c, 0x48, 0xfc, - 0xcd, 0x92, 0x36, 0x73, 0x35, 0x6d, 0x8a, 0x9d, 0x28, 0xbb, 0x99, 0x29, 0xab, 0xcd, 0x90, 0x39, - 0x5d, 0x63, 0x54, 0x61, 0x12, 0x26, 0x33, 0x51, 0xf7, 0x56, 0x17, 0x75, 0x55, 0xe1, 0xf6, 0x46, - 0x96, 0xb2, 0xb2, 0xaf, 0x82, 0x93, 0x34, 0x47, 0xfb, 0x37, 0x0d, 0x95, 0x49, 0xe8, 0x02, 0x7e, - 0x07, 0xd5, 0xa9, 0xcf, 0xbe, 0x08, 0x78, 0xe8, 0x8b, 0xa6, 0xd6, 0x2a, 0x75, 0xea, 0xf6, 0x46, - 0x1c, 0x19, 0xf5, 0xdd, 0xc3, 0xfd, 0x14, 0x24, 0x85, 0x1d, 0x6f, 0xa3, 0x06, 0xf5, 0xd9, 0x13, - 0x08, 0x54, 0x29, 0x69, 0xa1, 0x75, 0xfb, 0x46, 0x1c, 0x19, 0x8d, 0xdd, 0xc3, 0xfd, 0x19, 0x4c, - 0xe6, 0x7d, 0x54, 0xfc, 0x00, 0x04, 0x0f, 0x03, 0x07, 0x44, 0xb3, 0x54, 0xc4, 0x27, 0x33, 0x90, - 0x14, 0x76, 0xfc, 0x2e, 0xaa, 0x08, 0x87, 0xfb, 0xd0, 0x2c, 0xb7, 0xb4, 0x4e, 0xdd, 0x7e, 0x43, - 0x95, 0xdd, 0x55, 0xc0, 0x59, 0x64, 0xd4, 0x93, 0x8f, 0x47, 0xc7, 0x3e, 0x90, 0xd4, 0xa9, 0xfd, - 0xb3, 0x86, 0xb0, 0xd2, 0xf0, 0x94, 0xc9, 0xd1, 0x43, 0x1f, 0x52, 0xbd, 0x02, 0x7f, 0x8a, 0x10, - 0xcf, 0x4f, 0x99, 0x24, 0x23, 0xe9, 0xa6, 0x1c, 0x3d, 0x8b, 0x8c, 0x8d, 0xfc, 0x94, 0x84, 0x9c, - 0xa3, 0xe0, 0x43, 0x54, 0x0e, 0x42, 0x17, 0x9a, 0xd7, 0x96, 0x9e, 0xf8, 0x05, 0xef, 0xa0, 0x8a, - 0xb1, 0xd7, 0xb3, 0xfb, 0x4e, 0xae, 0x97, 0x24, 0x91, 0xda, 0x3f, 0x6a, 0xe8, 0x66, 0x17, 0x82, - 0x29, 0x73, 0x80, 0xc0, 0x00, 0x02, 0xf0, 0x1c, 0xc0, 0x16, 0xaa, 0x7b, 0x74, 0x02, 0xc2, 0xa7, - 0x0e, 0x24, 0xed, 0x54, 0xb7, 0x37, 0x33, 0x6e, 0xfd, 0xc1, 0xcc, 0x40, 0x0a, 0x1f, 0xdc, 0x42, - 0x65, 0x75, 0x48, 0xea, 0xaa, 0x17, 0x79, 0x94, 0x2f, 0x49, 0x2c, 0xf8, 0x36, 0x2a, 0xfb, 0x54, - 0x8e, 0x9a, 0xa5, 0xc4, 0xa3, 0xa6, 0xac, 0x87, 0x54, 0x8e, 0x48, 0x82, 0xb6, 0xff, 0xd0, 0x90, - 0xfe, 0x84, 0xba, 0xac, 0xff, 0xbf, 0x9b, 0xde, 0x7f, 0x34, 0xd4, 0xbe, 0x5a, 0xd9, 0x7f, 0x30, - 0xbf, 0x93, 0xc5, 0xf9, 0xfd, 0x72, 0x75, 0x59, 0x57, 0x97, 0x7e, 0xc9, 0x04, 0xff, 0x52, 0x41, - 0xd5, 0xcc, 0x3d, 0xef, 0x0c, 0xed, 0xd2, 0xce, 0x38, 0x42, 0xeb, 0x8e, 0xcb, 0xc0, 0x93, 0x69, - 0xe8, 0xac, 0xb7, 0x3f, 0x79, 0xe9, 0xab, 0xdf, 0x9b, 0x0b, 0x62, 0xbf, 0x96, 0x25, 0x5a, 0x9f, - 0x47, 0xc9, 0x42, 0x22, 0x4c, 0x51, 0x45, 0x8d, 0x40, 0x3a, 0xfb, 0x8d, 0x9d, 0x8f, 0x5f, 0x6e, - 0x9a, 0x16, 0x47, 0xbb, 0xb8, 0x09, 0x65, 0x13, 0x24, 0x8d, 0x8c, 0x0f, 0xd0, 0xc6, 0x80, 0x32, - 0x37, 0x0c, 0xe0, 0x90, 0xbb, 0xcc, 0x39, 0xce, 0xb6, 0xc7, 0xdb, 0x71, 0x64, 0x6c, 0xdc, 0x9b, - 0x37, 0x9c, 0x45, 0xc6, 0xe6, 0x02, 0x90, 0x8c, 0xfe, 0x22, 0x19, 0x7f, 0x8f, 0x36, 0xf3, 0x91, - 0xeb, 0x82, 0x0b, 0x8e, 0xe4, 0x41, 0xb3, 0x92, 0x5c, 0xd7, 0xfb, 0x2b, 0x76, 0x0b, 0xed, 0x81, - 0x3b, 0xa3, 0xda, 0xaf, 0xc7, 0x91, 0xb1, 0xf9, 0xe0, 0x7c, 0x44, 0xb2, 0x9c, 0x04, 0x7f, 0x86, - 0x1a, 0x82, 0xf5, 0xe1, 0xf3, 0xc1, 0x00, 0x1c, 0x29, 0x9a, 0xaf, 0x24, 0x2a, 0xda, 0x6a, 0xbb, - 0x76, 0x0b, 0xf8, 0x2c, 0x32, 0x6e, 0x14, 0xc7, 0x3d, 0x97, 0x0a, 0x41, 0xe6, 0x69, 0xf8, 0x2e, - 0xba, 0xae, 0x7e, 0xe0, 0x3c, 0x94, 0x5d, 0x70, 0xb8, 0xd7, 0x17, 0xcd, 0x6a, 0x4b, 0xeb, 0x54, - 0x6c, 0x1c, 0x47, 0xc6, 0xf5, 0x47, 0x0b, 0x16, 0x72, 0xce, 0x13, 0x3f, 0x46, 0xb7, 0xf2, 0x37, - 0x21, 0x30, 0x65, 0x70, 0x94, 0xef, 0xfa, 0x5a, 0xb2, 0x47, 0xdf, 0x8a, 0x23, 0xe3, 0xd6, 0xee, - 0xc5, 0x2e, 0xe4, 0x32, 0x6e, 0xfb, 0x57, 0x0d, 0xbd, 0x7a, 0x41, 0xff, 0x60, 0x8a, 0xaa, 0x22, - 0xdd, 0x8a, 0xd9, 0x38, 0xde, 0x5d, 0xbd, 0x3b, 0xce, 0xaf, 0x53, 0xbb, 0x11, 0x47, 0x46, 0x75, - 0x86, 0xce, 0xe2, 0xe2, 0x0e, 0xaa, 0x39, 0xd4, 0x0e, 0xbd, 0x7e, 0xb6, 0xcf, 0xd7, 0xed, 0x75, - 0x35, 0xbe, 0x7b, 0xbb, 0x29, 0x46, 0x72, 0x2b, 0x7e, 0x13, 0x95, 0xc2, 0xc0, 0xcd, 0x56, 0x67, - 0x35, 0x8e, 0x8c, 0xd2, 0x63, 0x72, 0x40, 0x14, 0x66, 0xdf, 0x39, 0x39, 0xd5, 0xd7, 0x9e, 0x9d, - 0xea, 0x6b, 0xcf, 0x4f, 0xf5, 0xb5, 0x1f, 0x62, 0x5d, 0x3b, 0x89, 0x75, 0xed, 0x59, 0xac, 0x6b, - 0xcf, 0x63, 0x5d, 0xfb, 0x33, 0xd6, 0xb5, 0x9f, 0xfe, 0xd2, 0xd7, 0xbe, 0xae, 0x66, 0xa5, 0xfd, - 0x1b, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xc0, 0x7c, 0xc4, 0x6f, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index a5fa0f30..17de1a58 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -28,9 +28,160 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +message MutatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + + // SideEffects states whether this webhookk has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + repeated string admissionReviewVersions = 8; + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + optional string reinvocationPolicy = 10; +} + // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. message MutatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -38,13 +189,13 @@ message MutatingWebhookConfiguration { // +optional // +patchMergeKey=name // +patchStrategy=merge - repeated Webhook Webhooks = 2; + repeated MutatingWebhook Webhooks = 2; } // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. message MutatingWebhookConfigurationList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -123,34 +274,16 @@ message ServiceReference { // this service. // +optional optional string path = 3; -} - -// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. -message ValidatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Webhooks is a list of webhooks and the affected resources and operations. + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated Webhook Webhooks = 2; + optional int32 port = 4; } -// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. -message ValidatingWebhookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ValidatingWebhookConfiguration. - repeated ValidatingWebhookConfiguration items = 2; -} - -// Webhook describes an admission webhook and the resources and operations it applies to. -message Webhook { +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +message ValidatingWebhook { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -175,6 +308,23 @@ message Webhook { // +optional optional string failurePolicy = 4; + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + optional string matchPolicy = 9; + // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on @@ -214,13 +364,27 @@ message Webhook { // } // // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // for more examples of label selectors. // // Default to the empty LabelSelector, which matches everything. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + // SideEffects states whether this webhookk has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be @@ -250,6 +414,31 @@ message Webhook { repeated string admissionReviewVersions = 8; } +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ValidatingWebhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + // WebhookClientConfig contains the information to make a TLS // connection with the webhook message WebhookClientConfig { @@ -287,8 +476,6 @@ message WebhookClientConfig { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional optional ServiceReference service = 1; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index f7025f60..cf065a28 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -84,6 +84,16 @@ const ( Fail FailurePolicyType = "Fail" ) +// MatchPolicyType specifies the type of match policy +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + type SideEffectClass string const ( @@ -105,16 +115,17 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. type ValidatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -123,7 +134,7 @@ type ValidatingWebhookConfiguration struct { type ValidatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingWebhookConfiguration. @@ -135,16 +146,17 @@ type ValidatingWebhookConfigurationList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. type MutatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -153,15 +165,147 @@ type MutatingWebhookConfiguration struct { type MutatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of MutatingWebhookConfiguration. Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` } -// Webhook describes an admission webhook and the resources and operations it applies to. -type Webhook struct { +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +type ValidatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` + + // SideEffects states whether this webhookk has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` +} + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +type MutatingWebhook struct { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -186,6 +330,23 @@ type Webhook struct { // +optional FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on @@ -232,6 +393,20 @@ type Webhook struct { // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` + // SideEffects states whether this webhookk has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be @@ -259,8 +434,39 @@ type Webhook struct { // Default to `['v1beta1']`. // +optional AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` } +// ReinvocationPolicyType specifies what type of policy the admission hook uses. +type ReinvocationPolicyType string + +const ( + // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = "Never" + // IfNeededReinvocationPolicy indicates that the webhook may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial webhook call. + IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" +) + // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make // sure that all the tuple expansions are valid. type RuleWithOperations struct { @@ -322,8 +528,6 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` @@ -346,4 +550,10 @@ type ServiceReference struct { // this service. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index f400f9f4..f40a15d5 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -27,9 +27,28 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhook = map[string]string{ + "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", +} + +func (MutatingWebhook) SwaggerDoc() map[string]string { + return map_MutatingWebhook +} + var map_MutatingWebhookConfiguration = map[string]string{ - "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -39,7 +58,7 @@ func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_MutatingWebhookConfigurationList = map[string]string{ "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of MutatingWebhookConfiguration.", } @@ -73,15 +92,34 @@ var map_ServiceReference = map[string]string{ "namespace": "`namespace` is the namespace of the service. Required", "name": "`name` is the name of the service. Required", "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", } func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } +var map_ValidatingWebhook = map[string]string{ + "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", +} + +func (ValidatingWebhook) SwaggerDoc() map[string]string { + return map_ValidatingWebhook +} + var map_ValidatingWebhookConfiguration = map[string]string{ - "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -91,7 +129,7 @@ func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_ValidatingWebhookConfigurationList = map[string]string{ "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ValidatingWebhookConfiguration.", } @@ -99,26 +137,10 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } -var map_Webhook = map[string]string{ - "": "Webhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", - "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", - "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", -} - -func (Webhook) SwaggerDoc() map[string]string { - return map_Webhook -} - var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index ab79ee6a..c4570d03 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,70 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(ReinvocationPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. +func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { + if in == nil { + return nil + } + out := new(MutatingWebhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { *out = *in @@ -32,7 +96,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfigu in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) + *out = make([]MutatingWebhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -62,7 +126,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]MutatingWebhookConfiguration, len(*in)) @@ -157,6 +221,11 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } return } @@ -170,6 +239,65 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. +func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { + if in == nil { + return nil + } + out := new(ValidatingWebhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { *out = *in @@ -177,7 +305,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookCon in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) + *out = make([]ValidatingWebhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -207,7 +335,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ValidatingWebhookConfiguration, len(*in)) @@ -236,55 +364,6 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Webhook) DeepCopyInto(out *Webhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - if in.AdmissionReviewVersions != nil { - in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. -func (in *Webhook) DeepCopy() *Webhook { - if in == nil { - return nil - } - out := new(Webhook) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 5b29f432..425144d8 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -17,57 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -80,124 +48,790 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_e1014cab6f31e43b, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_e1014cab6f31e43b, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) } -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{22} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{23} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{24} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{25} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{26} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptor_e1014cab6f31e43b, []int{27} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) } +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") @@ -228,10 +862,146 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b) +} + +var fileDescriptor_e1014cab6f31e43b = []byte{ + // 2031 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, + 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, + 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, + 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0xcd, 0x53, 0xf5, 0x7e, 0xaf, 0x7f, 0x55, + 0xf5, 0xab, 0x7a, 0xaf, 0xab, 0x0d, 0xee, 0x1d, 0x7f, 0xdb, 0x53, 0x75, 0xbb, 0x7d, 0xec, 0x1f, + 0x12, 0xd7, 0x22, 0x94, 0x78, 0xed, 0x21, 0xb1, 0x34, 0xdb, 0x6d, 0x8b, 0x0e, 0xec, 0xe8, 0x6d, + 0xec, 0x38, 0x5e, 0x7b, 0x78, 0xbb, 0x3d, 0x20, 0x16, 0x71, 0x31, 0x25, 0x9a, 0xea, 0xb8, 0x36, + 0xb5, 0x21, 0x0c, 0x30, 0x2a, 0x76, 0x74, 0x95, 0x61, 0xd4, 0xe1, 0xed, 0xab, 0xb7, 0x06, 0x3a, + 0x3d, 0xf2, 0x0f, 0xd5, 0xbe, 0x6d, 0xb6, 0x07, 0xf6, 0xc0, 0x6e, 0x73, 0xe8, 0xa1, 0xff, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0xe2, 0x31, 0x7d, 0xdb, 0x25, 0x92, 0xc7, + 0x5c, 0xbd, 0x1b, 0x63, 0x4c, 0xdc, 0x3f, 0xd2, 0x2d, 0xe2, 0x8e, 0xda, 0xce, 0xf1, 0x80, 0x35, + 0x78, 0x6d, 0x93, 0x50, 0x2c, 0x8b, 0x6a, 0x17, 0x45, 0xb9, 0xbe, 0x45, 0x75, 0x93, 0xe4, 0x02, + 0x5e, 0x9b, 0x14, 0xe0, 0xf5, 0x8f, 0x88, 0x89, 0x73, 0x71, 0xaf, 0x14, 0xc5, 0xf9, 0x54, 0x37, + 0xda, 0xba, 0x45, 0x3d, 0xea, 0x66, 0x83, 0x5a, 0xff, 0x51, 0x00, 0xec, 0xda, 0x16, 0x75, 0x6d, + 0xc3, 0x20, 0x2e, 0x22, 0x43, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x14, 0xd4, 0xd8, 0x78, 0x34, 0x4c, + 0x71, 0x5d, 0xb9, 0xae, 0x6c, 0x2c, 0xdc, 0xf9, 0x96, 0x1a, 0x4f, 0x72, 0x44, 0xaf, 0x3a, 0xc7, + 0x03, 0xd6, 0xe0, 0xa9, 0x0c, 0xad, 0x0e, 0x6f, 0xab, 0xbb, 0x87, 0xef, 0x92, 0x3e, 0xed, 0x11, + 0x8a, 0x3b, 0xf0, 0xc9, 0x49, 0x73, 0x66, 0x7c, 0xd2, 0x04, 0x71, 0x1b, 0x8a, 0x58, 0xe1, 0x2e, + 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x55, 0xc8, 0x2e, 0x06, 0xad, 0x22, 0xfc, 0xb3, 0x37, 0x1e, + 0x53, 0x62, 0xb1, 0xf4, 0x3a, 0x97, 0x04, 0x75, 0x65, 0x0b, 0x53, 0x8c, 0x38, 0x11, 0x7c, 0x19, + 0xd4, 0x5c, 0x91, 0x7e, 0xbd, 0x7c, 0x5d, 0xd9, 0x28, 0x77, 0x2e, 0x0b, 0x54, 0x2d, 0x1c, 0x16, + 0x8a, 0x10, 0xad, 0xbf, 0x2a, 0x60, 0x2d, 0x3f, 0xee, 0x6d, 0xdd, 0xa3, 0xf0, 0x9d, 0xdc, 0xd8, + 0xd5, 0xe9, 0xc6, 0xce, 0xa2, 0xf9, 0xc8, 0xa3, 0x07, 0x87, 0x2d, 0x89, 0x71, 0xbf, 0x0d, 0xaa, + 0x3a, 0x25, 0xa6, 0x57, 0x2f, 0x5d, 0x2f, 0x6f, 0x2c, 0xdc, 0xb9, 0xa1, 0xe6, 0x6b, 0x57, 0xcd, + 0x27, 0xd6, 0x59, 0x14, 0x94, 0xd5, 0xb7, 0x58, 0x30, 0x0a, 0x38, 0x5a, 0xff, 0x55, 0xc0, 0xfc, + 0x16, 0x26, 0xa6, 0x6d, 0xed, 0x13, 0x7a, 0x01, 0x8b, 0xd6, 0x05, 0x15, 0xcf, 0x21, 0x7d, 0xb1, + 0x68, 0x5f, 0x93, 0xe5, 0x1e, 0xa5, 0xb3, 0xef, 0x90, 0x7e, 0xbc, 0x50, 0xec, 0x17, 0xe2, 0xc1, + 0xf0, 0x6d, 0x30, 0xeb, 0x51, 0x4c, 0x7d, 0x8f, 0x2f, 0xd3, 0xc2, 0x9d, 0xaf, 0x9f, 0x4e, 0xc3, + 0xa1, 0x9d, 0x25, 0x41, 0x34, 0x1b, 0xfc, 0x46, 0x82, 0xa2, 0xf5, 0xaf, 0x12, 0x80, 0x11, 0xb6, + 0x6b, 0x5b, 0x9a, 0x4e, 0x59, 0xfd, 0xbe, 0x0e, 0x2a, 0x74, 0xe4, 0x10, 0x3e, 0x0d, 0xf3, 0x9d, + 0x1b, 0x61, 0x16, 0xf7, 0x47, 0x0e, 0xf9, 0xf8, 0xa4, 0xb9, 0x96, 0x8f, 0x60, 0x3d, 0x88, 0xc7, + 0xc0, 0xed, 0x28, 0xbf, 0x12, 0x8f, 0xbe, 0x9b, 0x7e, 0xf4, 0xc7, 0x27, 0x4d, 0xc9, 0x61, 0xa1, + 0x46, 0x4c, 0xe9, 0x04, 0xe1, 0x10, 0x40, 0x03, 0x7b, 0xf4, 0xbe, 0x8b, 0x2d, 0x2f, 0x78, 0x92, + 0x6e, 0x12, 0x31, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x16, 0xd1, 0xb9, 0x2a, 0xb2, 0x80, 0xdb, 0x39, + 0x36, 0x24, 0x79, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x28, 0xa2, + 0x09, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x22, 0x98, 0x33, 0x89, 0xe7, 0xe1, 0x01, 0xa9, 0x57, + 0x39, 0x70, 0x59, 0x00, 0xe7, 0x7a, 0x41, 0x33, 0x0a, 0xfb, 0x5b, 0xbf, 0x57, 0xc0, 0x62, 0x34, + 0x73, 0x17, 0xb0, 0x55, 0x3a, 0xe9, 0xad, 0xf2, 0xfc, 0xa9, 0x75, 0x52, 0xb0, 0x43, 0xde, 0x2b, + 0x27, 0x72, 0x66, 0x45, 0x08, 0x7f, 0x02, 0x6a, 0x1e, 0x31, 0x48, 0x9f, 0xda, 0xae, 0xc8, 0xf9, + 0x95, 0x29, 0x73, 0xc6, 0x87, 0xc4, 0xd8, 0x17, 0xa1, 0x9d, 0x4b, 0x2c, 0xe9, 0xf0, 0x17, 0x8a, + 0x28, 0xe1, 0x8f, 0x40, 0x8d, 0x12, 0xd3, 0x31, 0x30, 0x25, 0x62, 0x9b, 0xa4, 0xea, 0x9b, 0x95, + 0x0b, 0x23, 0xdb, 0xb3, 0xb5, 0xfb, 0x02, 0xc6, 0x37, 0x4a, 0x34, 0x0f, 0x61, 0x2b, 0x8a, 0x68, + 0xe0, 0x31, 0x58, 0xf2, 0x1d, 0x8d, 0x21, 0x29, 0x3b, 0xba, 0x07, 0x23, 0x51, 0x3e, 0x37, 0x4f, + 0x9d, 0x90, 0x83, 0x54, 0x48, 0x67, 0x4d, 0x3c, 0x60, 0x29, 0xdd, 0x8e, 0x32, 0xd4, 0x70, 0x13, + 0x2c, 0x9b, 0xba, 0x85, 0x08, 0xd6, 0x46, 0xfb, 0xa4, 0x6f, 0x5b, 0x9a, 0xc7, 0x0b, 0xa8, 0xda, + 0x59, 0x17, 0x04, 0xcb, 0xbd, 0x74, 0x37, 0xca, 0xe2, 0xe1, 0x36, 0x58, 0x0d, 0xcf, 0xd9, 0x1f, + 0xe8, 0x1e, 0xb5, 0xdd, 0xd1, 0xb6, 0x6e, 0xea, 0xb4, 0x3e, 0xcb, 0x79, 0xea, 0xe3, 0x93, 0xe6, + 0x2a, 0x92, 0xf4, 0x23, 0x69, 0x54, 0xeb, 0x37, 0xb3, 0x60, 0x39, 0x73, 0x1a, 0xc0, 0x07, 0x60, + 0xad, 0xef, 0xbb, 0x2e, 0xb1, 0xe8, 0x8e, 0x6f, 0x1e, 0x12, 0x77, 0xbf, 0x7f, 0x44, 0x34, 0xdf, + 0x20, 0x1a, 0x5f, 0xd1, 0x6a, 0xa7, 0x21, 0x72, 0x5d, 0xeb, 0x4a, 0x51, 0xa8, 0x20, 0x1a, 0xfe, + 0x10, 0x40, 0x8b, 0x37, 0xf5, 0x74, 0xcf, 0x8b, 0x38, 0x4b, 0x9c, 0x33, 0xda, 0x80, 0x3b, 0x39, + 0x04, 0x92, 0x44, 0xb1, 0x1c, 0x35, 0xe2, 0xe9, 0x2e, 0xd1, 0xb2, 0x39, 0x96, 0xd3, 0x39, 0x6e, + 0x49, 0x51, 0xa8, 0x20, 0x1a, 0xbe, 0x0a, 0x16, 0x82, 0xa7, 0xf1, 0x39, 0x17, 0x8b, 0xb3, 0x22, + 0xc8, 0x16, 0x76, 0xe2, 0x2e, 0x94, 0xc4, 0xb1, 0xa1, 0xd9, 0x87, 0x1e, 0x71, 0x87, 0x44, 0x7b, + 0x33, 0xf0, 0x00, 0x4c, 0x28, 0xab, 0x5c, 0x28, 0xa3, 0xa1, 0xed, 0xe6, 0x10, 0x48, 0x12, 0xc5, + 0x86, 0x16, 0x54, 0x4d, 0x6e, 0x68, 0xb3, 0xe9, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0x20, 0x9a, 0xd5, + 0x5e, 0x90, 0xf2, 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x5d, 0x7b, 0x3b, 0xe9, + 0x6e, 0x94, 0xc5, 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c, + 0x27, 0x48, 0xae, 0xec, 0x64, 0x01, 0x28, 0x1f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78, + 0x3d, 0x76, 0x6d, 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06, + 0x09, 0x1f, 0x02, 0xd0, 0x0f, 0xe5, 0xc0, 0xab, 0x83, 0x62, 0xa1, 0xcf, 0xeb, 0x50, 0x2c, 0xc0, + 0x51, 0x93, 0x87, 0x12, 0x6c, 0xad, 0xf7, 0x14, 0xb0, 0x5e, 0xb0, 0xc7, 0xe1, 0xf7, 0x52, 0xaa, + 0x77, 0x33, 0xa3, 0x7a, 0xd7, 0x0a, 0xc2, 0x12, 0xd2, 0xd7, 0x07, 0x8b, 0xcc, 0x77, 0xe8, 0xd6, + 0x20, 0x80, 0x88, 0x13, 0xec, 0x25, 0x59, 0xee, 0x28, 0x09, 0x8c, 0x8f, 0xe1, 0x2b, 0xe3, 0x93, + 0xe6, 0x62, 0xaa, 0x0f, 0xa5, 0x39, 0x5b, 0xbf, 0x2c, 0x01, 0xb0, 0x45, 0x1c, 0xc3, 0x1e, 0x99, + 0xc4, 0xba, 0x08, 0xd7, 0xb2, 0x95, 0x72, 0x2d, 0x2d, 0xe9, 0x42, 0x44, 0xf9, 0x14, 0xda, 0x96, + 0xed, 0x8c, 0x6d, 0xf9, 0xc6, 0x04, 0x9e, 0xd3, 0x7d, 0xcb, 0x3f, 0xca, 0x60, 0x25, 0x06, 0xc7, + 0xc6, 0xe5, 0x5e, 0x6a, 0x09, 0x5f, 0xc8, 0x2c, 0xe1, 0xba, 0x24, 0xe4, 0x53, 0x73, 0x2e, 0xef, + 0x82, 0x25, 0xe6, 0x2b, 0x82, 0x55, 0xe3, 0xae, 0x65, 0xf6, 0xcc, 0xae, 0x25, 0x52, 0x9d, 0xed, + 0x14, 0x13, 0xca, 0x30, 0x17, 0xb8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4b, 0xfa, 0x83, 0x02, 0x96, 0xe2, + 0x65, 0xba, 0x00, 0x9b, 0xd4, 0x4d, 0xdb, 0xa4, 0xc6, 0xe9, 0x75, 0x59, 0xe0, 0x93, 0xfe, 0x5e, + 0x49, 0x66, 0xcd, 0x8d, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x21, 0xab, 0x97, + 0x82, 0x97, 0xa9, 0xa0, 0x0d, 0x45, 0xbd, 0x29, 0x4b, 0x55, 0xfa, 0x74, 0x2d, 0x55, 0xf9, 0x93, + 0xb1, 0x54, 0xf7, 0x41, 0xcd, 0x0b, 0xcd, 0x54, 0x85, 0x53, 0xde, 0x98, 0xb4, 0x9d, 0x85, 0x8f, + 0x8a, 0x58, 0x23, 0x07, 0x15, 0x31, 0xc9, 0xbc, 0x53, 0xf5, 0xb3, 0xf4, 0x4e, 0xac, 0xbc, 0x1d, + 0xec, 0x7b, 0x44, 0xe3, 0x5b, 0xa9, 0x16, 0x97, 0xf7, 0x1e, 0x6f, 0x45, 0xa2, 0x17, 0x1e, 0x80, + 0x75, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x2d, 0x82, 0x35, 0x43, 0xb7, 0x48, 0x38, 0x80, 0x40, + 0xf5, 0xae, 0x8d, 0x4f, 0x9a, 0xeb, 0x7b, 0x72, 0x08, 0x2a, 0x8a, 0x6d, 0xfd, 0xb9, 0x02, 0x2e, + 0x67, 0x4f, 0xc4, 0x02, 0x23, 0xa2, 0x9c, 0xcb, 0x88, 0xbc, 0x9c, 0x28, 0xd1, 0xc0, 0xa5, 0x25, + 0xde, 0xf9, 0x73, 0x65, 0xba, 0x09, 0x96, 0x85, 0xf1, 0x08, 0x3b, 0x85, 0x15, 0x8b, 0x96, 0xe7, + 0x20, 0xdd, 0x8d, 0xb2, 0x78, 0x78, 0x0f, 0x2c, 0xba, 0xdc, 0x5b, 0x85, 0x04, 0x81, 0x3f, 0xf9, + 0x8a, 0x20, 0x58, 0x44, 0xc9, 0x4e, 0x94, 0xc6, 0x32, 0x6f, 0x12, 0x5b, 0x8e, 0x90, 0xa0, 0x92, + 0xf6, 0x26, 0x9b, 0x59, 0x00, 0xca, 0xc7, 0xc0, 0x1e, 0x58, 0xf1, 0xad, 0x3c, 0x55, 0x50, 0x6b, + 0xd7, 0x04, 0xd5, 0xca, 0x41, 0x1e, 0x82, 0x64, 0x71, 0xf0, 0xc7, 0x29, 0xbb, 0x32, 0xcb, 0x4f, + 0x91, 0x17, 0x4e, 0xdf, 0x0e, 0x53, 0xfb, 0x15, 0x89, 0x8f, 0xaa, 0x4d, 0xeb, 0xa3, 0x5a, 0x7f, + 0x52, 0x00, 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0xb9, 0x88, 0x84, 0x44, 0x6a, 0x72, 0x87, 0x73, + 0x73, 0xb2, 0xc3, 0x89, 0x4f, 0xd0, 0xe9, 0x2c, 0x8e, 0x98, 0xde, 0x8b, 0xb9, 0x98, 0x99, 0xc2, + 0xe2, 0xc4, 0xf9, 0x3c, 0x9b, 0xc5, 0x49, 0xf0, 0x9c, 0x6e, 0x71, 0xfe, 0x5d, 0x02, 0x2b, 0x31, + 0x78, 0x6a, 0x8b, 0x23, 0x09, 0xf9, 0xf2, 0x72, 0x66, 0x3a, 0xdb, 0x11, 0x4f, 0xdd, 0xff, 0x89, + 0xed, 0x88, 0x13, 0x2a, 0xb0, 0x1d, 0xbf, 0x2b, 0x25, 0xb3, 0x3e, 0xa3, 0xed, 0xf8, 0x04, 0xae, + 0x2a, 0x3e, 0x77, 0xce, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, 0x29, 0x1d, 0x54, 0x26, + 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x8f, 0x21, 0x21, 0x86, 0x81, 0x82, 0x7e, + 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0xbc, 0x2c, 0x56, 0x9e, 0x55, 0x16, 0xab, + 0xe7, 0x90, 0x45, 0xb9, 0xb3, 0x28, 0x9f, 0xcb, 0x59, 0x4c, 0xad, 0x89, 0x92, 0xe3, 0x6a, 0xe2, + 0x3b, 0xfc, 0xaf, 0x15, 0xb0, 0x26, 0x7f, 0x7d, 0x86, 0x06, 0x58, 0x32, 0xf1, 0xe3, 0xe4, 0xe5, + 0xc5, 0x24, 0xc1, 0xf0, 0xa9, 0x6e, 0xa8, 0xc1, 0xd7, 0x1d, 0xf5, 0x2d, 0x8b, 0xee, 0xba, 0xfb, + 0xd4, 0xd5, 0xad, 0x41, 0x20, 0xb0, 0xbd, 0x14, 0x17, 0xca, 0x70, 0xb7, 0x3e, 0x54, 0xc0, 0x7a, + 0x81, 0xca, 0x5d, 0x6c, 0x26, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, 0xbe, 0xef, 0x0e, 0x42, 0x49, + 0x3e, 0xfb, 0x73, 0xf8, 0x2e, 0xec, 0x09, 0x16, 0x14, 0xf1, 0xb5, 0x76, 0xc1, 0xf5, 0xd4, 0x20, + 0xd9, 0xa6, 0x21, 0x8f, 0x7c, 0x83, 0xef, 0x1f, 0xe1, 0x29, 0x6e, 0x82, 0x79, 0x07, 0xbb, 0x54, + 0x8f, 0xcc, 0x68, 0xb5, 0xb3, 0x38, 0x3e, 0x69, 0xce, 0xef, 0x85, 0x8d, 0x28, 0xee, 0x6f, 0xfd, + 0xaa, 0x04, 0x16, 0x12, 0x24, 0x17, 0xa0, 0xef, 0x6f, 0xa4, 0xf4, 0x5d, 0xfa, 0xc5, 0x24, 0x39, + 0xaa, 0x22, 0x81, 0xef, 0x65, 0x04, 0xfe, 0x9b, 0x93, 0x88, 0x4e, 0x57, 0xf8, 0x8f, 0x4a, 0x60, + 0x35, 0x81, 0x8e, 0x25, 0xfe, 0x3b, 0x29, 0x89, 0xdf, 0xc8, 0x48, 0x7c, 0x5d, 0x16, 0xf3, 0xa5, + 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x31, 0x77, 0x17, 0x20, 0xf2, 0x5b, 0x69, 0x91, + 0x6f, 0x4e, 0xa8, 0x97, 0x02, 0x95, 0x7f, 0x52, 0x4d, 0xe5, 0xfd, 0x85, 0xbf, 0x5d, 0xf8, 0x39, + 0x58, 0x1d, 0xda, 0x86, 0x6f, 0x92, 0xae, 0x81, 0x75, 0x33, 0x04, 0x30, 0x55, 0x64, 0x93, 0xf8, + 0xa2, 0x94, 0x9e, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x07, 0x71, 0x64, 0xac, 0xc5, 0x0f, 0x24, + 0x74, 0x48, 0xfa, 0x10, 0xf8, 0x2a, 0x58, 0x60, 0x6a, 0xa6, 0xf7, 0xc9, 0x0e, 0x36, 0xc3, 0x9a, + 0x8a, 0xbe, 0x0f, 0xec, 0xc7, 0x5d, 0x28, 0x89, 0x83, 0x47, 0x60, 0xc5, 0xb1, 0xb5, 0x1e, 0xb6, + 0xf0, 0x80, 0xb0, 0xf3, 0x7f, 0xcf, 0x36, 0xf4, 0xfe, 0x88, 0xdf, 0x3b, 0xcc, 0x77, 0x5e, 0x0b, + 0xdf, 0x29, 0xf7, 0xf2, 0x10, 0xe6, 0xd9, 0x25, 0xcd, 0x7c, 0x3f, 0xcb, 0x28, 0xa1, 0x99, 0xfb, + 0x9c, 0x35, 0x97, 0xfb, 0x1f, 0x00, 0x59, 0x71, 0x9d, 0xf3, 0x83, 0x56, 0xd1, 0x8d, 0x4a, 0xed, + 0x5c, 0x5f, 0xa3, 0x3e, 0xaa, 0x80, 0x2b, 0xb9, 0x03, 0xf2, 0x33, 0xbc, 0xd3, 0xc8, 0x39, 0xaf, + 0xf2, 0x19, 0x9c, 0xd7, 0x26, 0x58, 0x16, 0x1f, 0xc2, 0x32, 0xc6, 0x2d, 0x32, 0xd0, 0xdd, 0x74, + 0x37, 0xca, 0xe2, 0x65, 0x77, 0x2a, 0xd5, 0x33, 0xde, 0xa9, 0x24, 0xb3, 0x10, 0xff, 0xbf, 0x11, + 0x54, 0x5d, 0x3e, 0x0b, 0xf1, 0x6f, 0x1c, 0x59, 0x3c, 0xfc, 0x6e, 0x58, 0x52, 0x11, 0xc3, 0x1c, + 0x67, 0xc8, 0xd4, 0x48, 0x44, 0x90, 0x41, 0x3f, 0xd3, 0xc7, 0x9e, 0x77, 0x24, 0x1f, 0x7b, 0x36, + 0x26, 0x94, 0xf2, 0xf4, 0x56, 0xf1, 0x6f, 0x0a, 0x78, 0xae, 0x70, 0x0f, 0xc0, 0xcd, 0x94, 0xce, + 0xde, 0xca, 0xe8, 0xec, 0xf3, 0x85, 0x81, 0x09, 0xb1, 0x35, 0xe5, 0x17, 0x22, 0x77, 0x27, 0x5e, + 0x88, 0x48, 0x5c, 0xd4, 0xe4, 0x9b, 0x91, 0xce, 0xc6, 0x93, 0xa7, 0x8d, 0x99, 0xf7, 0x9f, 0x36, + 0x66, 0x3e, 0x78, 0xda, 0x98, 0xf9, 0xc5, 0xb8, 0xa1, 0x3c, 0x19, 0x37, 0x94, 0xf7, 0xc7, 0x0d, + 0xe5, 0x83, 0x71, 0x43, 0xf9, 0xe7, 0xb8, 0xa1, 0xfc, 0xf6, 0xc3, 0xc6, 0xcc, 0xc3, 0xd2, 0xf0, + 0xf6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x59, 0xb3, 0x11, 0xc0, 0x12, 0x26, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -239,36 +1009,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -276,37 +1055,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -314,41 +1102,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -356,41 +1155,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -398,37 +1208,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -436,51 +1255,62 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -488,58 +1318,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -547,31 +1384,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -579,41 +1424,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -621,49 +1477,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -671,37 +1540,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -709,69 +1587,80 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n19 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -779,52 +1668,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -832,31 +1728,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -864,41 +1768,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -906,41 +1821,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -948,37 +1874,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -986,43 +1921,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1030,44 +1974,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + _ = l if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1075,27 +2026,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1103,37 +2061,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1141,22 +2108,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1164,41 +2136,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1206,41 +2189,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1248,37 +2242,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n37, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1286,73 +2289,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n38, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n38 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n39, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n39 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1360,57 +2378,66 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1418,37 +2445,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n41 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1460,6 +2500,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1474,6 +2517,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1486,6 +2532,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1502,6 +2551,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1516,6 +2568,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -1534,6 +2589,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -1557,6 +2615,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1569,6 +2630,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1581,6 +2645,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1599,6 +2666,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1613,6 +2683,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1638,6 +2711,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1659,6 +2735,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1671,6 +2750,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1683,6 +2765,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1699,6 +2784,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1713,6 +2801,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1729,6 +2820,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1746,6 +2840,9 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1756,6 +2853,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1770,6 +2870,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1779,6 +2882,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1791,6 +2897,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1807,6 +2916,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1821,6 +2933,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1851,6 +2966,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1875,6 +2993,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1887,14 +3008,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1904,8 +3018,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -1915,9 +3029,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1927,7 +3046,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1941,7 +3060,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1952,9 +3071,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1964,8 +3088,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -1977,6 +3101,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -1987,7 +3116,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1998,7 +3127,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -2008,7 +3137,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2024,8 +3153,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2034,9 +3163,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2047,8 +3181,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2062,13 +3196,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2081,7 +3220,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2091,7 +3230,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2105,7 +3244,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2116,9 +3255,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2129,8 +3273,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2140,13 +3284,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2156,7 +3305,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2166,8 +3315,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2187,7 +3336,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2201,7 +3350,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2212,9 +3361,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2223,11 +3377,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2240,6 +3399,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2249,7 +3413,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2260,7 +3424,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2288,7 +3452,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2316,7 +3480,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2325,6 +3489,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2346,7 +3513,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2355,6 +3522,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2376,7 +3546,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2390,6 +3560,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2417,7 +3590,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2445,7 +3618,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2454,6 +3627,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2475,7 +3651,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2484,6 +3660,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2501,6 +3680,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2528,7 +3710,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2556,7 +3738,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2565,6 +3747,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2586,7 +3771,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2595,6 +3780,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2616,7 +3804,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2625,6 +3813,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2641,6 +3832,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2668,7 +3862,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2696,7 +3890,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2706,6 +3900,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2725,7 +3922,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2735,6 +3932,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2754,7 +3954,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2763,6 +3963,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2784,7 +3987,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2794,6 +3997,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2813,7 +4019,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2823,6 +4029,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2837,6 +4046,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2864,7 +4076,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2892,7 +4104,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2901,6 +4113,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2922,7 +4137,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2931,6 +4146,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2948,6 +4166,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2975,7 +4196,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3003,7 +4224,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3012,11 +4233,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3036,7 +4260,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3045,6 +4269,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3066,7 +4293,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3075,6 +4302,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3096,7 +4326,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3115,7 +4345,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3130,6 +4360,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3157,7 +4390,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3185,7 +4418,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3204,7 +4437,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3223,7 +4456,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3242,7 +4475,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3261,7 +4494,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3280,7 +4513,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3299,7 +4532,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3318,7 +4551,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3337,7 +4570,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3357,7 +4590,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3366,6 +4599,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3383,6 +4619,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3410,7 +4649,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3438,7 +4677,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3448,6 +4687,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3467,7 +4709,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3476,6 +4718,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3495,6 +4740,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3522,7 +4770,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3550,7 +4798,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3559,6 +4807,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3580,7 +4831,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3589,6 +4840,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3610,7 +4864,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3619,6 +4873,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3635,6 +4892,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3662,7 +4922,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3690,7 +4950,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3700,6 +4960,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3719,7 +4982,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3729,6 +4992,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3748,7 +5014,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3758,6 +5024,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3777,7 +5046,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3787,6 +5056,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3806,7 +5078,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3815,6 +5087,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3836,7 +5111,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3845,6 +5120,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3861,6 +5139,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3888,7 +5169,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3916,7 +5197,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3925,6 +5206,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3946,7 +5230,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3955,6 +5239,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3972,6 +5259,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3999,7 +5289,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4027,7 +5317,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4047,7 +5337,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4056,11 +5346,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4080,7 +5373,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4089,6 +5382,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4110,7 +5406,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4119,6 +5415,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4140,7 +5439,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4159,7 +5458,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4179,7 +5478,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4199,7 +5498,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4214,6 +5513,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4241,7 +5543,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4269,7 +5571,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4288,7 +5590,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4307,7 +5609,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4326,7 +5628,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4345,7 +5647,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4364,7 +5666,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4373,6 +5675,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4395,7 +5700,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4414,7 +5719,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4429,6 +5734,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4456,7 +5764,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4484,7 +5792,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4494,6 +5802,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4513,7 +5824,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4522,6 +5833,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4541,6 +5855,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4568,7 +5885,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4596,7 +5913,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4605,6 +5922,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4626,7 +5946,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4635,6 +5955,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4656,7 +5979,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4665,6 +5988,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4681,6 +6007,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4708,7 +6037,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4736,7 +6065,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4746,6 +6075,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4765,7 +6097,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4775,6 +6107,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4794,7 +6129,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4803,6 +6138,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4824,7 +6162,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4834,6 +6172,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4853,7 +6194,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4863,6 +6204,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4877,6 +6221,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4904,7 +6251,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4932,7 +6279,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4941,6 +6288,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4962,7 +6312,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4971,6 +6321,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4988,6 +6341,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5015,7 +6371,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5043,7 +6399,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5063,7 +6419,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5072,11 +6428,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5096,7 +6455,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5105,6 +6464,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5126,7 +6488,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5140,6 +6502,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5167,7 +6532,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5195,7 +6560,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5214,7 +6579,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5233,7 +6598,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5252,7 +6617,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5271,7 +6636,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5290,7 +6655,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5299,6 +6664,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5316,6 +6684,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5343,7 +6714,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5371,7 +6742,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5380,11 +6751,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5399,6 +6773,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5426,7 +6803,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5454,7 +6831,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5463,11 +6840,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5487,7 +6867,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5496,11 +6876,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5515,6 +6898,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5542,7 +6928,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5570,7 +6956,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5585,6 +6971,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5612,7 +7001,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5640,7 +7029,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5649,6 +7038,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5670,7 +7062,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5679,6 +7071,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5700,7 +7095,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5709,6 +7104,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5725,6 +7123,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5752,7 +7153,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5780,7 +7181,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5790,6 +7191,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5809,7 +7213,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5819,6 +7223,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5838,7 +7245,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5847,6 +7254,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5868,7 +7278,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5878,6 +7288,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5897,7 +7310,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5907,6 +7320,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5921,6 +7337,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5948,7 +7367,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5976,7 +7395,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5985,6 +7404,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6006,7 +7428,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6015,6 +7437,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6032,6 +7457,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6059,7 +7487,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6087,7 +7515,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6107,7 +7535,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6116,11 +7544,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6140,7 +7571,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6149,6 +7580,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6170,7 +7604,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6179,10 +7613,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6201,7 +7638,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6211,6 +7648,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6230,7 +7670,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6240,6 +7680,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6259,7 +7702,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6268,6 +7711,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6289,7 +7735,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6304,6 +7750,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6331,7 +7780,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6359,7 +7808,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6378,7 +7827,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6397,7 +7846,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6416,7 +7865,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6435,7 +7884,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6454,7 +7903,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6464,6 +7913,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6483,7 +7935,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6493,6 +7945,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6512,7 +7967,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6532,7 +7987,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6541,6 +7996,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6558,6 +8016,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6585,7 +8046,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6613,7 +8074,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6623,6 +8084,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6642,7 +8106,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6651,6 +8115,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6670,6 +8137,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6736,10 +8206,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -6768,6 +8241,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -6786,139 +8262,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2037 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, - 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, - 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, - 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, - 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, - 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, - 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x75, 0x50, 0x94, 0x9b, 0xa7, 0xea, 0xfd, 0x5e, 0xff, 0xaa, - 0xea, 0x57, 0xf5, 0x5e, 0x57, 0x1b, 0xdc, 0x3b, 0xfe, 0xb6, 0xa7, 0xea, 0x76, 0xfb, 0xd8, 0x3f, - 0x24, 0xae, 0x45, 0x28, 0xf1, 0xda, 0x43, 0x62, 0x69, 0xb6, 0xdb, 0x16, 0x1d, 0xd8, 0xd1, 0xdb, - 0xd8, 0x71, 0xbc, 0xf6, 0xf0, 0x76, 0x7b, 0x40, 0x2c, 0xe2, 0x62, 0x4a, 0x34, 0xd5, 0x71, 0x6d, - 0x6a, 0x43, 0x18, 0x60, 0x54, 0xec, 0xe8, 0x2a, 0xc3, 0xa8, 0xc3, 0xdb, 0x57, 0x6f, 0x0d, 0x74, - 0x7a, 0xe4, 0x1f, 0xaa, 0x7d, 0xdb, 0x6c, 0x0f, 0xec, 0x81, 0xdd, 0xe6, 0xd0, 0x43, 0xff, 0x11, - 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x02, 0x8a, 0xab, 0xad, 0xc4, 0x63, 0xfa, 0xb6, 0x4b, 0x24, 0x8f, - 0xb9, 0x7a, 0x37, 0xc6, 0x98, 0xb8, 0x7f, 0xa4, 0x5b, 0xc4, 0x1d, 0xb5, 0x9d, 0xe3, 0x01, 0x6b, - 0xf0, 0xda, 0x26, 0xa1, 0x58, 0x16, 0xd5, 0x2e, 0x8a, 0x72, 0x7d, 0x8b, 0xea, 0x26, 0xc9, 0x05, - 0xbc, 0x31, 0x29, 0xc0, 0xeb, 0x1f, 0x11, 0x13, 0xe7, 0xe2, 0x5e, 0x2b, 0x8a, 0xf3, 0xa9, 0x6e, - 0xb4, 0x75, 0x8b, 0x7a, 0xd4, 0xcd, 0x06, 0xb5, 0xfe, 0xa3, 0x00, 0xd8, 0xb5, 0x2d, 0xea, 0xda, - 0x86, 0x41, 0x5c, 0x44, 0x86, 0xba, 0xa7, 0xdb, 0x16, 0xfc, 0x29, 0xa8, 0xb1, 0xf1, 0x68, 0x98, - 0xe2, 0xba, 0x72, 0x5d, 0xd9, 0x58, 0xb8, 0xf3, 0x2d, 0x35, 0x9e, 0xe4, 0x88, 0x5e, 0x75, 0x8e, - 0x07, 0xac, 0xc1, 0x53, 0x19, 0x5a, 0x1d, 0xde, 0x56, 0x77, 0x0f, 0xdf, 0x27, 0x7d, 0xda, 0x23, - 0x14, 0x77, 0xe0, 0x93, 0x93, 0xe6, 0xcc, 0xf8, 0xa4, 0x09, 0xe2, 0x36, 0x14, 0xb1, 0xc2, 0x5d, - 0x50, 0xe1, 0xec, 0x25, 0xce, 0x7e, 0xab, 0x90, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3d, - 0xa6, 0xc4, 0x62, 0xe9, 0x75, 0x2e, 0x09, 0xea, 0xca, 0x16, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x2a, - 0xa8, 0xb9, 0x22, 0xfd, 0x7a, 0xf9, 0xba, 0xb2, 0x51, 0xee, 0x5c, 0x16, 0xa8, 0x5a, 0x38, 0x2c, - 0x14, 0x21, 0x5a, 0x7f, 0x55, 0xc0, 0x5a, 0x7e, 0xdc, 0xdb, 0xba, 0x47, 0xe1, 0x7b, 0xb9, 0xb1, - 0xab, 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x47, 0x0f, 0x0e, 0x5b, 0x12, 0xe3, 0x7e, 0x17, 0x54, - 0x75, 0x4a, 0x4c, 0xaf, 0x5e, 0xba, 0x5e, 0xde, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0xd7, 0xae, 0x9a, - 0x4f, 0xac, 0xb3, 0x28, 0x28, 0xab, 0xef, 0xb0, 0x60, 0x14, 0x70, 0xb4, 0xfe, 0xab, 0x80, 0xf9, - 0x2d, 0x4c, 0x4c, 0xdb, 0xda, 0x27, 0xf4, 0x02, 0x16, 0xad, 0x0b, 0x2a, 0x9e, 0x43, 0xfa, 0x62, - 0xd1, 0xbe, 0x26, 0xcb, 0x3d, 0x4a, 0x67, 0xdf, 0x21, 0xfd, 0x78, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, - 0xe1, 0xbb, 0x60, 0xd6, 0xa3, 0x98, 0xfa, 0x1e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, - 0x43, 0x3b, 0x4b, 0x82, 0x68, 0x36, 0xf8, 0x8d, 0x04, 0x45, 0xeb, 0x5f, 0x25, 0x00, 0x23, 0x6c, - 0xd7, 0xb6, 0x34, 0x9d, 0xb2, 0xfa, 0x7d, 0x13, 0x54, 0xe8, 0xc8, 0x21, 0x7c, 0x1a, 0xe6, 0x3b, - 0x37, 0xc2, 0x2c, 0xee, 0x8f, 0x1c, 0xf2, 0xe9, 0x49, 0x73, 0x2d, 0x1f, 0xc1, 0x7a, 0x10, 0x8f, - 0x81, 0xdb, 0x51, 0x7e, 0x25, 0x1e, 0x7d, 0x37, 0xfd, 0xe8, 0x4f, 0x4f, 0x9a, 0x92, 0xc3, 0x42, - 0x8d, 0x98, 0xd2, 0x09, 0xc2, 0x21, 0x80, 0x06, 0xf6, 0xe8, 0x7d, 0x17, 0x5b, 0x5e, 0xf0, 0x24, - 0xdd, 0x24, 0x62, 0xe4, 0xaf, 0x4c, 0xb7, 0x3c, 0x2c, 0xa2, 0x73, 0x55, 0x64, 0x01, 0xb7, 0x73, - 0x6c, 0x48, 0xf2, 0x04, 0x78, 0x03, 0xcc, 0xba, 0x04, 0x7b, 0xb6, 0x55, 0xaf, 0xf0, 0x51, 0x44, - 0x13, 0x88, 0x78, 0x2b, 0x12, 0xbd, 0xf0, 0x65, 0x30, 0x67, 0x12, 0xcf, 0xc3, 0x03, 0x52, 0xaf, - 0x72, 0xe0, 0xb2, 0x00, 0xce, 0xf5, 0x82, 0x66, 0x14, 0xf6, 0xb7, 0x7e, 0xaf, 0x80, 0xc5, 0x68, - 0xe6, 0x2e, 0x60, 0xab, 0x74, 0xd2, 0x5b, 0xe5, 0xc5, 0x53, 0xeb, 0xa4, 0x60, 0x87, 0x7c, 0x50, - 0x4e, 0xe4, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0xd4, 0x3c, 0x62, 0x90, 0x3e, 0xb5, 0x5d, 0x91, 0xf3, - 0x6b, 0x53, 0xe6, 0x8c, 0x0f, 0x89, 0xb1, 0x2f, 0x42, 0x3b, 0x97, 0x58, 0xd2, 0xe1, 0x2f, 0x14, - 0x51, 0xc2, 0x1f, 0x81, 0x1a, 0x25, 0xa6, 0x63, 0x60, 0x4a, 0xc4, 0x36, 0x49, 0xd5, 0x37, 0x2b, - 0x17, 0x46, 0xb6, 0x67, 0x6b, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x68, 0x1e, 0xc2, 0x56, 0x14, 0xd1, - 0xc0, 0x63, 0xb0, 0xe4, 0x3b, 0x1a, 0x43, 0x52, 0x76, 0x74, 0x0f, 0x46, 0xa2, 0x7c, 0x6e, 0x9e, - 0x3a, 0x21, 0x07, 0xa9, 0x90, 0xce, 0x9a, 0x78, 0xc0, 0x52, 0xba, 0x1d, 0x65, 0xa8, 0xe1, 0x26, - 0x58, 0x36, 0x75, 0x0b, 0x11, 0xac, 0x8d, 0xf6, 0x49, 0xdf, 0xb6, 0x34, 0x8f, 0x17, 0x50, 0xb5, - 0xb3, 0x2e, 0x08, 0x96, 0x7b, 0xe9, 0x6e, 0x94, 0xc5, 0xc3, 0x6d, 0xb0, 0x1a, 0x9e, 0xb3, 0x3f, - 0xd0, 0x3d, 0x6a, 0xbb, 0xa3, 0x6d, 0xdd, 0xd4, 0x69, 0x7d, 0x96, 0xf3, 0xd4, 0xc7, 0x27, 0xcd, - 0x55, 0x24, 0xe9, 0x47, 0xd2, 0xa8, 0xd6, 0x6f, 0x66, 0xc1, 0x72, 0xe6, 0x34, 0x80, 0x0f, 0xc0, - 0x5a, 0xdf, 0x77, 0x5d, 0x62, 0xd1, 0x1d, 0xdf, 0x3c, 0x24, 0xee, 0x7e, 0xff, 0x88, 0x68, 0xbe, - 0x41, 0x34, 0xbe, 0xa2, 0xd5, 0x4e, 0x43, 0xe4, 0xba, 0xd6, 0x95, 0xa2, 0x50, 0x41, 0x34, 0xfc, - 0x21, 0x80, 0x16, 0x6f, 0xea, 0xe9, 0x9e, 0x17, 0x71, 0x96, 0x38, 0x67, 0xb4, 0x01, 0x77, 0x72, - 0x08, 0x24, 0x89, 0x62, 0x39, 0x6a, 0xc4, 0xd3, 0x5d, 0xa2, 0x65, 0x73, 0x2c, 0xa7, 0x73, 0xdc, - 0x92, 0xa2, 0x50, 0x41, 0x34, 0x7c, 0x1d, 0x2c, 0x04, 0x4f, 0xe3, 0x73, 0x2e, 0x16, 0x67, 0x45, - 0x90, 0x2d, 0xec, 0xc4, 0x5d, 0x28, 0x89, 0x63, 0x43, 0xb3, 0x0f, 0x3d, 0xe2, 0x0e, 0x89, 0xf6, - 0x76, 0xe0, 0x01, 0x98, 0x50, 0x56, 0xb9, 0x50, 0x46, 0x43, 0xdb, 0xcd, 0x21, 0x90, 0x24, 0x8a, - 0x0d, 0x2d, 0xa8, 0x9a, 0xdc, 0xd0, 0x66, 0xd3, 0x43, 0x3b, 0x90, 0xa2, 0x50, 0x41, 0x34, 0xab, - 0xbd, 0x20, 0xe5, 0xcd, 0x21, 0xd6, 0x0d, 0x7c, 0x68, 0x90, 0xfa, 0x5c, 0xba, 0xf6, 0x76, 0xd2, - 0xdd, 0x28, 0x8b, 0x87, 0x6f, 0x83, 0x2b, 0x41, 0xd3, 0x81, 0x85, 0x23, 0x92, 0x1a, 0x27, 0x79, - 0x41, 0x90, 0x5c, 0xd9, 0xc9, 0x02, 0x50, 0x3e, 0x06, 0xbe, 0x09, 0x96, 0xfa, 0xb6, 0x61, 0xf0, - 0x7a, 0xec, 0xda, 0xbe, 0x45, 0xeb, 0xf3, 0x9c, 0x05, 0xb2, 0x3d, 0xd4, 0x4d, 0xf5, 0xa0, 0x0c, - 0x12, 0x3e, 0x04, 0xa0, 0x1f, 0xca, 0x81, 0x57, 0x07, 0xc5, 0x42, 0x9f, 0xd7, 0xa1, 0x58, 0x80, - 0xa3, 0x26, 0x0f, 0x25, 0xd8, 0x5a, 0x1f, 0x28, 0x60, 0xbd, 0x60, 0x8f, 0xc3, 0xef, 0xa5, 0x54, - 0xef, 0x66, 0x46, 0xf5, 0xae, 0x15, 0x84, 0x25, 0xa4, 0xaf, 0x0f, 0x16, 0x99, 0xef, 0xd0, 0xad, - 0x41, 0x00, 0x11, 0x27, 0xd8, 0x2b, 0xb2, 0xdc, 0x51, 0x12, 0x18, 0x1f, 0xc3, 0x57, 0xc6, 0x27, - 0xcd, 0xc5, 0x54, 0x1f, 0x4a, 0x73, 0xb6, 0x7e, 0x59, 0x02, 0x60, 0x8b, 0x38, 0x86, 0x3d, 0x32, - 0x89, 0x75, 0x11, 0xae, 0x65, 0x2b, 0xe5, 0x5a, 0x5a, 0xd2, 0x85, 0x88, 0xf2, 0x29, 0xb4, 0x2d, - 0xdb, 0x19, 0xdb, 0xf2, 0x8d, 0x09, 0x3c, 0xa7, 0xfb, 0x96, 0x7f, 0x94, 0xc1, 0x4a, 0x0c, 0x8e, - 0x8d, 0xcb, 0xbd, 0xd4, 0x12, 0xbe, 0x94, 0x59, 0xc2, 0x75, 0x49, 0xc8, 0x73, 0x73, 0x2e, 0x9f, - 0xbd, 0x83, 0x80, 0xef, 0x83, 0x25, 0x66, 0x55, 0x82, 0x42, 0xe0, 0x46, 0x68, 0xf6, 0xcc, 0x46, - 0x28, 0x12, 0xb2, 0xed, 0x14, 0x13, 0xca, 0x30, 0x17, 0x18, 0xaf, 0xb9, 0xe7, 0x6d, 0xbc, 0x5a, - 0x7f, 0x50, 0xc0, 0x52, 0xbc, 0x4c, 0x17, 0x60, 0x93, 0xba, 0x69, 0x9b, 0xd4, 0x38, 0xbd, 0x2e, - 0x0b, 0x7c, 0xd2, 0xdf, 0x2b, 0xc9, 0xac, 0xb9, 0x51, 0xda, 0x60, 0x2f, 0x54, 0x8e, 0xa1, 0xf7, - 0xb1, 0x27, 0x64, 0xf5, 0x52, 0xf0, 0x32, 0x15, 0xb4, 0xa1, 0xa8, 0x37, 0x65, 0xa9, 0x4a, 0xcf, - 0xd7, 0x52, 0x95, 0x3f, 0x1b, 0x4b, 0x75, 0x1f, 0xd4, 0xbc, 0xd0, 0x4c, 0x55, 0x38, 0xe5, 0x8d, - 0x49, 0xdb, 0x59, 0xf8, 0xa8, 0x88, 0x35, 0x72, 0x50, 0x11, 0x93, 0xcc, 0x3b, 0x55, 0x3f, 0x4f, - 0xef, 0xc4, 0xb6, 0xb0, 0x83, 0x7d, 0x8f, 0x68, 0xbc, 0xee, 0x6b, 0xf1, 0x16, 0xde, 0xe3, 0xad, - 0x48, 0xf4, 0xc2, 0x03, 0xb0, 0xee, 0xb8, 0xf6, 0xc0, 0x25, 0x9e, 0xb7, 0x45, 0xb0, 0x66, 0xe8, - 0x16, 0x09, 0x07, 0x10, 0xa8, 0xde, 0xb5, 0xf1, 0x49, 0x73, 0x7d, 0x4f, 0x0e, 0x41, 0x45, 0xb1, - 0xad, 0x3f, 0x57, 0xc0, 0xe5, 0xec, 0x89, 0x58, 0x60, 0x44, 0x94, 0x73, 0x19, 0x91, 0x57, 0x13, - 0x25, 0x1a, 0xb8, 0xb4, 0xc4, 0x3b, 0x7f, 0xae, 0x4c, 0x37, 0xc1, 0xb2, 0x30, 0x1e, 0x61, 0xa7, - 0xb0, 0x62, 0xd1, 0xf2, 0x1c, 0xa4, 0xbb, 0x51, 0x16, 0xcf, 0xec, 0x45, 0xec, 0x1a, 0x42, 0x92, - 0x4a, 0xda, 0x5e, 0x6c, 0x66, 0x01, 0x28, 0x1f, 0x03, 0x7b, 0x60, 0xc5, 0xb7, 0xf2, 0x54, 0x41, - 0xb9, 0x5c, 0x13, 0x54, 0x2b, 0x07, 0x79, 0x08, 0x92, 0xc5, 0xc1, 0x1f, 0xa7, 0x1c, 0xc7, 0x2c, - 0x3f, 0x08, 0x5e, 0x3a, 0xbd, 0xa2, 0xa7, 0xb6, 0x1c, 0xf0, 0x1e, 0x58, 0x74, 0xb9, 0xa1, 0x0c, - 0xb3, 0x0c, 0x4c, 0xd9, 0x57, 0x44, 0xd8, 0x22, 0x4a, 0x76, 0xa2, 0x34, 0x56, 0xe2, 0xa3, 0x6a, - 0xd3, 0xfa, 0xa8, 0xd6, 0x9f, 0x14, 0x00, 0xf3, 0x5b, 0x70, 0xe2, 0xcb, 0x7d, 0x2e, 0x22, 0x21, - 0x91, 0x9a, 0xdc, 0xe1, 0xdc, 0x9c, 0xec, 0x70, 0xe2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, - 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x71, 0x3e, 0xcf, 0x66, 0x71, 0x12, 0x3c, 0xa7, 0x5b, 0x9c, - 0x7f, 0x97, 0xc0, 0x4a, 0x0c, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0xbe, 0xbc, 0x9c, 0x99, 0x7c, 0x39, - 0xc3, 0x6c, 0x47, 0x3c, 0x75, 0xff, 0x27, 0xb6, 0x23, 0x4e, 0xa8, 0xc0, 0x76, 0xfc, 0xae, 0x94, - 0xcc, 0xfa, 0x0b, 0x6f, 0x3b, 0x9e, 0xfd, 0x72, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, - 0x29, 0x1d, 0x54, 0x26, 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x4f, 0x43, 0x42, - 0x0c, 0x03, 0x05, 0xfd, 0xaa, 0x88, 0x5c, 0xfd, 0xbe, 0x04, 0x83, 0xa4, 0x91, 0x05, 0x9a, 0x5e, - 0x3e, 0x97, 0xa6, 0xe7, 0xd4, 0xa6, 0x72, 0x06, 0xb5, 0x91, 0xea, 0x73, 0xf5, 0x1c, 0xfa, 0x3c, - 0xb5, 0xa0, 0x4a, 0x8e, 0xab, 0x89, 0xef, 0xf0, 0xbf, 0x56, 0xc0, 0x9a, 0xfc, 0xf5, 0x19, 0x1a, - 0x60, 0xc9, 0xc4, 0x8f, 0x93, 0x97, 0x17, 0x93, 0x04, 0xc3, 0xa7, 0xba, 0xa1, 0x06, 0x5f, 0x77, - 0xd4, 0x77, 0x2c, 0xba, 0xeb, 0xee, 0x53, 0x57, 0xb7, 0x06, 0x81, 0xc0, 0xf6, 0x52, 0x5c, 0x28, - 0xc3, 0xdd, 0xfa, 0x58, 0x01, 0xeb, 0x05, 0x2a, 0x77, 0xb1, 0x99, 0xc0, 0x87, 0xa0, 0x66, 0xe2, - 0xc7, 0xfb, 0xbe, 0x3b, 0x08, 0x25, 0xf9, 0xec, 0xcf, 0xe1, 0x1b, 0xb9, 0x27, 0x58, 0x50, 0xc4, - 0xd7, 0xda, 0x05, 0xd7, 0x53, 0x83, 0x64, 0x9b, 0x86, 0x3c, 0xf2, 0x0d, 0xbe, 0x7f, 0x84, 0xa7, - 0xb8, 0x09, 0xe6, 0x1d, 0xec, 0x52, 0x3d, 0x32, 0xa3, 0xd5, 0xce, 0xe2, 0xf8, 0xa4, 0x39, 0xbf, - 0x17, 0x36, 0xa2, 0xb8, 0xbf, 0xf5, 0xab, 0x12, 0x58, 0x48, 0x90, 0x5c, 0x80, 0xbe, 0xbf, 0x95, - 0xd2, 0x77, 0xe9, 0x17, 0x93, 0xe4, 0xa8, 0x8a, 0x04, 0xbe, 0x97, 0x11, 0xf8, 0x6f, 0x4e, 0x22, - 0x3a, 0x5d, 0xe1, 0x3f, 0x29, 0x81, 0xd5, 0x04, 0x3a, 0x96, 0xf8, 0xef, 0xa4, 0x24, 0x7e, 0x23, - 0x23, 0xf1, 0x75, 0x59, 0xcc, 0x97, 0x1a, 0x3f, 0x59, 0xe3, 0xff, 0xa8, 0x80, 0xe5, 0xc4, 0xdc, - 0x5d, 0x80, 0xc8, 0x6f, 0xa5, 0x45, 0xbe, 0x39, 0xa1, 0x5e, 0x0a, 0x54, 0xfe, 0x49, 0x35, 0x95, - 0xf7, 0x17, 0x5e, 0xe6, 0x7f, 0x0e, 0x56, 0x87, 0xb6, 0xe1, 0x9b, 0xa4, 0x6b, 0x60, 0xdd, 0x0c, - 0x01, 0x4c, 0xc9, 0xd8, 0x24, 0xbe, 0x2c, 0xa5, 0x27, 0xae, 0xa7, 0x7b, 0x94, 0x58, 0xf4, 0x41, - 0x1c, 0x19, 0x6b, 0xf1, 0x03, 0x09, 0x1d, 0x92, 0x3e, 0x04, 0xbe, 0x0e, 0x16, 0x98, 0xa6, 0xea, - 0x7d, 0xb2, 0x83, 0xcd, 0xb0, 0xa6, 0xa2, 0xef, 0x03, 0xfb, 0x71, 0x17, 0x4a, 0xe2, 0xe0, 0x11, - 0x58, 0x71, 0x6c, 0xad, 0x87, 0x2d, 0x3c, 0x20, 0xec, 0xfc, 0xdf, 0xb3, 0x0d, 0xbd, 0x3f, 0xe2, - 0xf7, 0x0e, 0xf3, 0x9d, 0x37, 0xc2, 0x17, 0xd2, 0xbd, 0x3c, 0x84, 0x79, 0x76, 0x49, 0x33, 0xdf, - 0xcf, 0x32, 0x4a, 0x68, 0xe6, 0x3e, 0x67, 0xcd, 0xe5, 0xfe, 0x07, 0x40, 0x56, 0x5c, 0xe7, 0xfc, - 0xa0, 0x55, 0x74, 0xa3, 0x52, 0x3b, 0xd7, 0xd7, 0xa8, 0x4f, 0x2a, 0xe0, 0x4a, 0xee, 0x80, 0xfc, - 0x1c, 0xef, 0x34, 0x72, 0x6e, 0xa9, 0x7c, 0x06, 0xb7, 0xb4, 0x09, 0x96, 0xc5, 0x87, 0xb0, 0x8c, - 0xd9, 0x8a, 0xec, 0x68, 0x37, 0xdd, 0x8d, 0xb2, 0x78, 0xd9, 0x9d, 0x4a, 0xf5, 0x8c, 0x77, 0x2a, - 0xc9, 0x2c, 0xc4, 0xff, 0x6f, 0x04, 0x55, 0x97, 0xcf, 0x42, 0xfc, 0x1b, 0x47, 0x16, 0x0f, 0xbf, - 0x1b, 0x96, 0x54, 0xc4, 0x30, 0xc7, 0x19, 0x32, 0x35, 0x12, 0x11, 0x64, 0xd0, 0xcf, 0xf4, 0xb1, - 0xe7, 0x3d, 0xc9, 0xc7, 0x9e, 0x8d, 0x09, 0xa5, 0x3c, 0xbd, 0x55, 0xfc, 0x9b, 0x02, 0x5e, 0x28, - 0xdc, 0x03, 0x70, 0x33, 0xa5, 0xb3, 0xb7, 0x32, 0x3a, 0xfb, 0x62, 0x61, 0x60, 0x42, 0x6c, 0x4d, - 0xf9, 0x85, 0xc8, 0xdd, 0x89, 0x17, 0x22, 0x12, 0x17, 0x35, 0xf9, 0x66, 0xa4, 0xb3, 0xf1, 0xe4, - 0x69, 0x63, 0xe6, 0xc3, 0xa7, 0x8d, 0x99, 0x8f, 0x9e, 0x36, 0x66, 0x7e, 0x31, 0x6e, 0x28, 0x4f, - 0xc6, 0x0d, 0xe5, 0xc3, 0x71, 0x43, 0xf9, 0x68, 0xdc, 0x50, 0xfe, 0x39, 0x6e, 0x28, 0xbf, 0xfd, - 0xb8, 0x31, 0xf3, 0xb0, 0x34, 0xbc, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x6b, 0x01, - 0x7b, 0x12, 0x26, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index fea81922..6c552797 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -41,7 +41,7 @@ option go_package = "v1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -54,7 +54,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -65,12 +65,12 @@ message ControllerRevisionList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -78,7 +78,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -107,7 +107,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -366,12 +366,12 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -379,7 +379,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -408,7 +408,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 2fe85745..e003a0c4 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -95,13 +95,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -618,12 +618,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -631,7 +631,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -649,7 +649,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -668,12 +668,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -681,7 +681,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -692,7 +692,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -800,7 +800,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -817,7 +817,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 7e992c58..3f0299d0 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 885203fc..7b7ff385 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -653,7 +653,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 93530475..921e055c 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -17,52 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - RollbackConfig - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -75,95 +49,593 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{2} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{4} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{5} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{6} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{7} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{8} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{9} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() +} +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_2a07313e8f66e805, []int{10} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_2a07313e8f66e805, []int{11} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{12} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{13} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{14} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{15} } +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{16} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{17} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{18} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{19} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_2a07313e8f66e805, []int{20} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() } +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") @@ -172,6 +644,7 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") @@ -181,6 +654,7 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") @@ -188,10 +662,135 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2a07313e8f66e805) +} + +var fileDescriptor_2a07313e8f66e805 = []byte{ + // 1855 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35, + 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, + 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab, + 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, + 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, + 0x77, 0xdb, 0x6d, 0xa4, 0xb5, 0x04, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, + 0xc0, 0x0f, 0xce, 0xde, 0xf3, 0x54, 0xdd, 0xee, 0x9d, 0xf9, 0x03, 0xea, 0x5a, 0x94, 0x51, 0xaf, + 0x37, 0xa6, 0xd6, 0xd0, 0x76, 0x7b, 0x52, 0x40, 0x1c, 0xbd, 0x47, 0x1c, 0xc7, 0xeb, 0x8d, 0x6f, + 0x0f, 0x28, 0x23, 0xb7, 0x7b, 0x23, 0x6a, 0x51, 0x97, 0x30, 0x3a, 0x54, 0x1d, 0xd7, 0x66, 0x36, + 0x5a, 0x0b, 0x14, 0x55, 0xe2, 0xe8, 0x2a, 0x57, 0x54, 0xa5, 0xe2, 0xfa, 0xf6, 0x48, 0x67, 0x8f, + 0xfc, 0x81, 0xaa, 0xd9, 0x66, 0x6f, 0x64, 0x8f, 0xec, 0x9e, 0xd0, 0x1f, 0xf8, 0x0f, 0xc5, 0x97, + 0xf8, 0x10, 0xbf, 0x02, 0x9c, 0xf5, 0x6e, 0xc2, 0xa1, 0x66, 0xbb, 0xb4, 0x37, 0xce, 0xf9, 0x5a, + 0x7f, 0x27, 0xd6, 0x31, 0x89, 0xf6, 0x48, 0xb7, 0xa8, 0x3b, 0xe9, 0x39, 0x67, 0x23, 0xbe, 0xe0, + 0xf5, 0x4c, 0xca, 0x48, 0x91, 0x55, 0xaf, 0xcc, 0xca, 0xf5, 0x2d, 0xa6, 0x9b, 0x34, 0x67, 0xf0, + 0xee, 0x65, 0x06, 0x9e, 0xf6, 0x88, 0x9a, 0x24, 0x67, 0xf7, 0x76, 0x99, 0x9d, 0xcf, 0x74, 0xa3, + 0xa7, 0x5b, 0xcc, 0x63, 0x6e, 0xd6, 0xa8, 0xfb, 0x2f, 0x05, 0xd0, 0x9e, 0x6d, 0x31, 0xd7, 0x36, + 0x0c, 0xea, 0x62, 0x3a, 0xd6, 0x3d, 0xdd, 0xb6, 0xd0, 0xe7, 0xd0, 0xe4, 0xe7, 0x19, 0x12, 0x46, + 0xda, 0xca, 0xa6, 0xb2, 0xb5, 0xb8, 0xf3, 0x96, 0x1a, 0xdf, 0x74, 0x04, 0xaf, 0x3a, 0x67, 0x23, + 0xbe, 0xe0, 0xa9, 0x5c, 0x5b, 0x1d, 0xdf, 0x56, 0x0f, 0x07, 0x5f, 0x50, 0x8d, 0x1d, 0x50, 0x46, + 0xfa, 0xe8, 0xc9, 0xf9, 0xc6, 0xcc, 0xf4, 0x7c, 0x03, 0xe2, 0x35, 0x1c, 0xa1, 0xa2, 0x43, 0xa8, + 0x0b, 0xf4, 0x9a, 0x40, 0xdf, 0x2e, 0x45, 0x97, 0x87, 0x56, 0x31, 0xf9, 0xc5, 0xfd, 0xc7, 0x8c, + 0x5a, 0x7c, 0x7b, 0xfd, 0x17, 0x24, 0x74, 0xfd, 0x1e, 0x61, 0x04, 0x0b, 0x20, 0xf4, 0x26, 0x34, + 0x5d, 0xb9, 0xfd, 0xf6, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbf, 0x21, 0xb5, 0x9a, 0xe1, 0xb1, 0x70, + 0xa4, 0xd1, 0x7d, 0xa2, 0xc0, 0xad, 0xfc, 0xb9, 0xf7, 0x75, 0x8f, 0xa1, 0x9f, 0xe5, 0xce, 0xae, + 0x56, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x8f, 0x1c, 0x87, 0x2b, 0x89, 0x73, 0x1f, 0x41, 0x43, 0x67, + 0xd4, 0xf4, 0xda, 0xb5, 0xcd, 0xd9, 0xad, 0xc5, 0x9d, 0x37, 0xd4, 0x92, 0x00, 0x56, 0xf3, 0xbb, + 0xeb, 0x2f, 0x49, 0xdc, 0xc6, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0xaf, 0x6b, 0x00, 0xf7, 0xa8, + 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0x9e, 0xee, 0x01, 0xd4, 0x3d, 0x87, 0x6a, 0xf2, 0xe9, + 0x5e, 0x2d, 0x3d, 0x41, 0xbc, 0xa9, 0x63, 0x87, 0x6a, 0xf1, 0xa3, 0xf1, 0x2f, 0x2c, 0x20, 0xd0, + 0x27, 0x30, 0xe7, 0x31, 0xc2, 0x7c, 0x4f, 0x3c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, 0x41, + 0xbf, 0x25, 0xe1, 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9d, 0x85, 0x95, 0x58, 0x79, 0xcf, + 0xb6, 0x86, 0x3a, 0xe3, 0x21, 0x7d, 0x17, 0xea, 0x6c, 0xe2, 0x50, 0x71, 0x27, 0x0b, 0xfd, 0x57, + 0xc3, 0xcd, 0x9c, 0x4c, 0x1c, 0xfa, 0xec, 0x7c, 0x63, 0xad, 0xc0, 0x84, 0x8b, 0xb0, 0x30, 0x42, + 0xfb, 0xd1, 0x3e, 0x6b, 0xc2, 0xfc, 0x9d, 0xb4, 0xf3, 0x67, 0xe7, 0x1b, 0x05, 0x05, 0x44, 0x8d, + 0x90, 0xd2, 0x5b, 0x44, 0x5f, 0x40, 0xcb, 0x20, 0x1e, 0x3b, 0x75, 0x86, 0x84, 0xd1, 0x13, 0xdd, + 0xa4, 0xed, 0x39, 0x71, 0xfa, 0xd7, 0xab, 0x3d, 0x14, 0xb7, 0xe8, 0xdf, 0x92, 0x3b, 0x68, 0xed, + 0xa7, 0x90, 0x70, 0x06, 0x19, 0x8d, 0x01, 0xf1, 0x95, 0x13, 0x97, 0x58, 0x5e, 0x70, 0x2a, 0xee, + 0x6f, 0xfe, 0xca, 0xfe, 0xd6, 0xa5, 0x3f, 0xb4, 0x9f, 0x43, 0xc3, 0x05, 0x1e, 0xd0, 0x2b, 0x30, + 0xe7, 0x52, 0xe2, 0xd9, 0x56, 0xbb, 0x2e, 0x6e, 0x2c, 0x7a, 0x2e, 0x2c, 0x56, 0xb1, 0x94, 0xa2, + 0xd7, 0x60, 0xde, 0xa4, 0x9e, 0x47, 0x46, 0xb4, 0xdd, 0x10, 0x8a, 0xcb, 0x52, 0x71, 0xfe, 0x20, + 0x58, 0xc6, 0xa1, 0xbc, 0xfb, 0x7b, 0x05, 0x5a, 0xf1, 0x33, 0x5d, 0x43, 0xae, 0x7e, 0x94, 0xce, + 0xd5, 0xef, 0x56, 0x08, 0xce, 0x92, 0x1c, 0xfd, 0x7b, 0x0d, 0x50, 0xac, 0x84, 0x6d, 0xc3, 0x18, + 0x10, 0xed, 0x0c, 0x6d, 0x42, 0xdd, 0x22, 0x66, 0x18, 0x93, 0x51, 0x82, 0xfc, 0x88, 0x98, 0x14, + 0x0b, 0x09, 0xfa, 0x52, 0x01, 0xe4, 0x8b, 0xd7, 0x1c, 0xee, 0x5a, 0x96, 0xcd, 0x08, 0xbf, 0xe0, + 0x70, 0x43, 0x7b, 0x15, 0x36, 0x14, 0xfa, 0x52, 0x4f, 0x73, 0x28, 0xf7, 0x2d, 0xe6, 0x4e, 0xe2, + 0x87, 0xcd, 0x2b, 0xe0, 0x02, 0xd7, 0xe8, 0xa7, 0x00, 0xae, 0xc4, 0x3c, 0xb1, 0x65, 0xda, 0x96, + 0xd7, 0x80, 0xd0, 0xfd, 0x9e, 0x6d, 0x3d, 0xd4, 0x47, 0x71, 0x61, 0xc1, 0x11, 0x04, 0x4e, 0xc0, + 0xad, 0xdf, 0x87, 0xb5, 0x92, 0x7d, 0xa2, 0x1b, 0x30, 0x7b, 0x46, 0x27, 0xc1, 0x55, 0x61, 0xfe, + 0x13, 0xad, 0x42, 0x63, 0x4c, 0x0c, 0x9f, 0x06, 0x39, 0x89, 0x83, 0x8f, 0x3b, 0xb5, 0xf7, 0x94, + 0xee, 0xef, 0x1a, 0xc9, 0x48, 0xe1, 0xf5, 0x06, 0x6d, 0xf1, 0xf6, 0xe0, 0x18, 0xba, 0x46, 0x3c, + 0x81, 0xd1, 0xe8, 0xbf, 0x10, 0xb4, 0x86, 0x60, 0x0d, 0x47, 0x52, 0xf4, 0x73, 0x68, 0x7a, 0xd4, + 0xa0, 0x1a, 0xb3, 0x5d, 0x59, 0xe2, 0xde, 0xae, 0x18, 0x53, 0x64, 0x40, 0x8d, 0x63, 0x69, 0x1a, + 0xc0, 0x87, 0x5f, 0x38, 0x82, 0x44, 0x9f, 0x40, 0x93, 0x51, 0xd3, 0x31, 0x08, 0xa3, 0xf2, 0xf6, + 0x52, 0x71, 0xc5, 0x6b, 0x07, 0x07, 0x3b, 0xb2, 0x87, 0x27, 0x52, 0x4d, 0x54, 0xcf, 0x28, 0x4e, + 0xc3, 0x55, 0x1c, 0xc1, 0xa0, 0x9f, 0x40, 0xd3, 0x63, 0xbc, 0xab, 0x8f, 0x26, 0x22, 0xdb, 0x2e, + 0x6a, 0x2b, 0xc9, 0x3a, 0x1a, 0x98, 0xc4, 0xd0, 0xe1, 0x0a, 0x8e, 0xe0, 0xd0, 0x2e, 0x2c, 0x9b, + 0xba, 0x85, 0x29, 0x19, 0x4e, 0x8e, 0xa9, 0x66, 0x5b, 0x43, 0x4f, 0xa4, 0x69, 0xa3, 0xbf, 0x26, + 0x8d, 0x96, 0x0f, 0xd2, 0x62, 0x9c, 0xd5, 0x47, 0xfb, 0xb0, 0x1a, 0xb6, 0xdd, 0x8f, 0x74, 0x8f, + 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xa3, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, 0xe2, + 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x75, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x86, 0x35, 0xe3, 0xba, + 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x1f, 0xa7, 0xc2, 0xb4, 0x79, 0xb5, 0x30, 0x6d, 0x95, 0x87, + 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, 0xa2, + 0xe1, 0xcd, 0x2c, 0x88, 0x13, 0xbd, 0x34, 0x3d, 0xdf, 0x58, 0x3b, 0x2a, 0x56, 0xc1, 0x65, 0xb6, + 0xdd, 0x3f, 0xd5, 0xe1, 0x46, 0xb6, 0xc7, 0xa1, 0x8f, 0x01, 0xd9, 0x03, 0x8f, 0xba, 0x63, 0x3a, + 0xfc, 0x30, 0x18, 0xdc, 0xf8, 0x74, 0xa3, 0x88, 0xe9, 0x26, 0xca, 0xdb, 0xc3, 0x9c, 0x06, 0x2e, + 0xb0, 0x0a, 0xe6, 0x23, 0x99, 0x00, 0x35, 0xb1, 0xd1, 0xc4, 0x7c, 0x94, 0x4b, 0x82, 0x5d, 0x58, + 0x96, 0xb9, 0x1f, 0x0a, 0x45, 0xb0, 0x26, 0xde, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, 0x17, + 0x96, 0x5c, 0x1e, 0x07, 0x11, 0xc0, 0xbc, 0x00, 0xf8, 0x96, 0x04, 0x58, 0xc2, 0x49, 0x21, 0x4e, + 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x75, 0x01, 0xf0, + 0xa2, 0x04, 0xb8, 0xb9, 0x9b, 0x55, 0xc0, 0x79, 0x1b, 0x74, 0x00, 0x2b, 0xbe, 0x95, 0x87, 0x0a, + 0x82, 0xf8, 0x25, 0x09, 0xb5, 0x72, 0x9a, 0x57, 0xc1, 0x45, 0x76, 0xe8, 0x73, 0x00, 0x2d, 0xec, + 0xea, 0x5e, 0x7b, 0x4e, 0x94, 0xe1, 0x37, 0x2b, 0x24, 0x5b, 0x34, 0x0a, 0xc4, 0x25, 0x30, 0x5a, + 0xf2, 0x70, 0x02, 0x13, 0xdd, 0x81, 0x96, 0x66, 0x1b, 0x86, 0x88, 0xfc, 0x3d, 0xdb, 0xb7, 0x98, + 0x08, 0xde, 0x46, 0x1f, 0xf1, 0x66, 0xbf, 0x97, 0x92, 0xe0, 0x8c, 0x66, 0xf7, 0x8f, 0x4a, 0xb2, + 0xcd, 0x84, 0xe9, 0x8c, 0xee, 0xa4, 0x46, 0x9f, 0x57, 0x32, 0xa3, 0xcf, 0xad, 0xbc, 0x45, 0x62, + 0xf2, 0xd1, 0x61, 0x89, 0x07, 0xbf, 0x6e, 0x8d, 0x82, 0x07, 0x97, 0x25, 0xf1, 0xad, 0x0b, 0x53, + 0x29, 0xd2, 0x4e, 0x34, 0xc6, 0x9b, 0xe2, 0xcd, 0x93, 0x42, 0x9c, 0x46, 0xee, 0x7e, 0x00, 0xad, + 0x74, 0x1e, 0xa6, 0x66, 0x7a, 0xe5, 0xd2, 0x99, 0xfe, 0x1b, 0x05, 0xd6, 0x4a, 0xbc, 0x23, 0x03, + 0x5a, 0x26, 0x79, 0x9c, 0x78, 0xe6, 0x4b, 0x67, 0x63, 0xce, 0x9a, 0xd4, 0x80, 0x35, 0xa9, 0x0f, + 0x2c, 0x76, 0xe8, 0x1e, 0x33, 0x57, 0xb7, 0x46, 0xc1, 0x3b, 0x1c, 0xa4, 0xb0, 0x70, 0x06, 0x1b, + 0x7d, 0x06, 0x4d, 0x93, 0x3c, 0x3e, 0xf6, 0xdd, 0x51, 0xd1, 0x7d, 0x55, 0xf3, 0x23, 0xfa, 0xc7, + 0x81, 0x44, 0xc1, 0x11, 0x5e, 0xf7, 0x10, 0x36, 0x53, 0x87, 0xe4, 0xa5, 0x82, 0x3e, 0xf4, 0x8d, + 0x63, 0x1a, 0x3f, 0xf8, 0x1b, 0xb0, 0xe0, 0x10, 0x97, 0xe9, 0x51, 0xb9, 0x68, 0xf4, 0x97, 0xa6, + 0xe7, 0x1b, 0x0b, 0x47, 0xe1, 0x22, 0x8e, 0xe5, 0xdd, 0x7f, 0x2b, 0xd0, 0x38, 0xd6, 0x88, 0x41, + 0xaf, 0x81, 0x3a, 0xdc, 0x4b, 0x51, 0x87, 0x6e, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, + 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, + 0xe5, 0xb2, 0x2a, 0xd9, 0xfd, 0x6d, 0x0d, 0x16, 0x13, 0x2e, 0xae, 0x66, 0xcd, 0xaf, 0x3b, 0x31, + 0x68, 0xf0, 0x4a, 0xb2, 0x53, 0xe5, 0x20, 0x6a, 0x38, 0x54, 0x04, 0xf3, 0x5b, 0xdc, 0xbd, 0xf3, + 0xb3, 0xc6, 0x07, 0xd0, 0x62, 0xc4, 0x1d, 0x51, 0x16, 0xca, 0xc4, 0x85, 0x2d, 0xc4, 0xe4, 0xe1, + 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, + 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, + 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x47, 0x0d, 0x56, 0x13, + 0xda, 0x31, 0x37, 0xfd, 0x7e, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, + 0xd3, 0x62, 0xc2, 0x38, 0xfb, 0xbf, 0x48, 0x18, 0xff, 0xa0, 0xc0, 0x72, 0xe2, 0xee, 0xae, 0x81, + 0x31, 0x3e, 0x48, 0x33, 0xc6, 0x97, 0xab, 0x04, 0x4d, 0x09, 0x65, 0xfc, 0x73, 0x23, 0xb5, 0xf9, + 0xff, 0x7b, 0x12, 0xf3, 0x4b, 0x58, 0x1d, 0xdb, 0x86, 0x6f, 0xd2, 0x3d, 0x83, 0xe8, 0x66, 0xa8, + 0xc0, 0x87, 0xbe, 0xd9, 0xec, 0x1f, 0x43, 0x11, 0x3c, 0x75, 0x3d, 0xdd, 0x63, 0xd4, 0x62, 0x9f, + 0xc6, 0x96, 0xfd, 0x6f, 0x4b, 0x27, 0xab, 0x9f, 0x16, 0xc0, 0xe1, 0x42, 0x27, 0xe8, 0x7b, 0xb0, + 0xc8, 0x07, 0x66, 0x5d, 0xa3, 0x9c, 0x7b, 0xcb, 0xc0, 0x5a, 0x91, 0x40, 0x8b, 0xc7, 0xb1, 0x08, + 0x27, 0xf5, 0xd0, 0x23, 0x58, 0x71, 0xec, 0xe1, 0x01, 0xb1, 0xc8, 0x88, 0xf2, 0x31, 0xe3, 0xc8, + 0x36, 0x74, 0x6d, 0x22, 0x98, 0xcd, 0x42, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, 0xe3, + 0x14, 0x21, 0xbf, 0x2c, 0x92, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0x65, 0xbb, 0x97, 0x44, 0x2f, + 0xf8, 0x0b, 0x67, 0xa7, 0x4a, 0x84, 0x9d, 0xa6, 0x2c, 0xe3, 0xea, 0x9f, 0x5e, 0xc7, 0x19, 0x0f, + 0xa5, 0xc4, 0xad, 0xf9, 0xdf, 0x10, 0xb7, 0xee, 0x3f, 0xeb, 0x70, 0x33, 0x57, 0x2a, 0xd1, 0x0f, + 0x2f, 0x60, 0x38, 0xb7, 0x9e, 0x1b, 0xbb, 0xc9, 0x51, 0x93, 0xd9, 0x2b, 0x50, 0x93, 0x5d, 0x58, + 0xd6, 0x7c, 0xd7, 0xa5, 0x16, 0xcb, 0x10, 0x93, 0x88, 0x1a, 0xed, 0xa5, 0xc5, 0x38, 0xab, 0x5f, + 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, 0xbb, 0xfc, 0x2e, 0xe4, + 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, 0x83, 0xd3, 0x94, 0x14, + 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x47, 0x81, 0xc8, 0xf1, 0xed, + 0x2a, 0xb1, 0x5c, 0x99, 0x48, 0x75, 0xff, 0xa2, 0xc0, 0x8b, 0xa5, 0x49, 0x80, 0x76, 0x53, 0x2d, + 0x77, 0x3b, 0xd3, 0x72, 0xbf, 0x53, 0x6a, 0x98, 0xe8, 0xbb, 0x6e, 0x31, 0x35, 0x7a, 0xbf, 0x1a, + 0x35, 0x2a, 0x98, 0xdb, 0x2f, 0xe7, 0x48, 0xfd, 0xed, 0x27, 0x4f, 0x3b, 0x33, 0x5f, 0x3d, 0xed, + 0xcc, 0x7c, 0xfd, 0xb4, 0x33, 0xf3, 0xab, 0x69, 0x47, 0x79, 0x32, 0xed, 0x28, 0x5f, 0x4d, 0x3b, + 0xca, 0xd7, 0xd3, 0x8e, 0xf2, 0xb7, 0x69, 0x47, 0xf9, 0xcd, 0x37, 0x9d, 0x99, 0xcf, 0xe6, 0xa5, + 0xc7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x99, 0x8d, 0x1e, 0xaf, 0x61, 0x1b, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -199,36 +798,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -236,37 +844,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -274,41 +891,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -316,49 +944,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n7, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -366,37 +1007,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -404,51 +1054,61 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n10, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -456,79 +1116,92 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n11, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n12, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n13, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x38 - i++ + i-- if m.Paused { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n14, err := m.RollbackTo.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n14 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -536,52 +1209,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -589,31 +1269,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n15, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -621,20 +1309,25 @@ func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { } func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -642,37 +1335,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n16, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n17, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -680,22 +1382,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -703,41 +1410,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n18, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n19, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n20, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -745,20 +1463,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -766,46 +1489,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -813,41 +1544,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n21, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n22, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n23, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n23 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -855,41 +1597,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -897,37 +1650,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n25, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -935,73 +1697,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n26, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n26 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n27, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n27 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1009,59 +1786,68 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1069,37 +1855,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1111,6 +1910,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1125,6 +1927,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1137,6 +1942,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1155,6 +1963,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1169,6 +1980,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1187,6 +2001,9 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1216,6 +2033,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1237,6 +2057,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1249,6 +2072,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -1256,6 +2082,9 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1270,6 +2099,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1279,6 +2111,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1291,6 +2126,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1298,6 +2136,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1315,6 +2156,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1327,6 +2171,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1343,6 +2190,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1357,6 +2207,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1387,6 +2240,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1413,6 +2269,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1425,14 +2284,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1442,8 +2294,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -1453,9 +2305,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1465,7 +2322,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1481,8 +2338,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1491,9 +2348,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1526,13 +2388,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -1542,13 +2404,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -1561,7 +2428,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -1581,8 +2448,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1602,7 +2469,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1646,7 +2513,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1660,7 +2527,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1671,9 +2538,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1682,11 +2554,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -1699,6 +2576,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -1708,7 +2590,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1719,7 +2601,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -1747,7 +2629,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1775,7 +2657,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1784,6 +2666,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1805,7 +2690,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1814,6 +2699,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1835,7 +2723,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1849,6 +2737,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1876,7 +2767,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1904,7 +2795,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1913,6 +2804,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1934,7 +2828,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1943,6 +2837,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1960,6 +2857,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1987,7 +2887,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2015,7 +2915,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2024,6 +2924,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2045,7 +2948,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2054,6 +2957,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2075,7 +2981,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2084,6 +2990,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2100,6 +3009,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2127,7 +3039,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2155,7 +3067,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2165,6 +3077,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2184,7 +3099,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2194,6 +3109,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2213,7 +3131,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2223,6 +3141,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2242,7 +3163,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2252,6 +3173,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2271,7 +3195,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2280,6 +3204,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2301,7 +3228,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2310,6 +3237,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2326,6 +3256,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2353,7 +3286,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2381,7 +3314,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2390,6 +3323,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2411,7 +3347,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2420,6 +3356,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2437,6 +3376,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2464,7 +3406,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2492,7 +3434,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2502,6 +3444,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2521,7 +3466,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2530,6 +3475,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2550,7 +3498,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2567,7 +3515,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2577,6 +3525,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2593,7 +3544,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2603,6 +3554,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -2639,7 +3593,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2648,6 +3602,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2664,6 +3621,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2691,7 +3651,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2719,7 +3679,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2739,7 +3699,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2748,11 +3708,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2772,7 +3735,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2781,6 +3744,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2802,7 +3768,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2811,6 +3777,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2832,7 +3801,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2851,7 +3820,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2871,7 +3840,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2891,7 +3860,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2900,6 +3869,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2924,7 +3896,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2939,6 +3911,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2966,7 +3941,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2994,7 +3969,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3013,7 +3988,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3032,7 +4007,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3051,7 +4026,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3070,7 +4045,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3089,7 +4064,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3098,6 +4073,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3120,7 +4098,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3139,7 +4117,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3154,6 +4132,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3181,7 +4162,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3209,7 +4190,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3219,6 +4200,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3238,7 +4222,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3247,6 +4231,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3266,6 +4253,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3293,7 +4283,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3321,7 +4311,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3335,6 +4325,9 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3362,7 +4355,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3390,7 +4383,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3399,11 +4392,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3423,7 +4419,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3432,11 +4428,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3451,6 +4450,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3478,7 +4480,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3506,7 +4508,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3521,6 +4523,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3548,7 +4553,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3576,7 +4581,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3585,6 +4590,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3606,7 +4614,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3615,6 +4623,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3636,7 +4647,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3645,6 +4656,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3661,6 +4675,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3688,7 +4705,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3716,7 +4733,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3730,6 +4747,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3757,7 +4777,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3785,7 +4805,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3804,7 +4824,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3813,6 +4833,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3833,7 +4856,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3850,7 +4873,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3860,6 +4883,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -3876,7 +4902,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3886,6 +4912,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -3922,7 +4951,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3932,6 +4961,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3946,6 +4978,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3973,7 +5008,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4001,7 +5036,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4010,6 +5045,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4031,7 +5069,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4040,6 +5078,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4061,7 +5102,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4070,6 +5111,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4086,6 +5130,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4113,7 +5160,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4141,7 +5188,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4151,6 +5198,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4170,7 +5220,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4180,6 +5230,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4199,7 +5252,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4208,6 +5261,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4229,7 +5285,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4239,6 +5295,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4258,7 +5317,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4268,6 +5327,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4282,6 +5344,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4309,7 +5374,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4337,7 +5402,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4346,6 +5411,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4367,7 +5435,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4376,6 +5444,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4393,6 +5464,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4420,7 +5494,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4448,7 +5522,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4468,7 +5542,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4477,11 +5551,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4501,7 +5578,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4510,6 +5587,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4531,7 +5611,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4540,10 +5620,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -4562,7 +5645,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4572,6 +5655,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4591,7 +5677,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4601,6 +5687,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4620,7 +5709,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4629,6 +5718,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4650,7 +5742,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4665,6 +5757,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4692,7 +5787,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4720,7 +5815,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4740,7 +5835,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4759,7 +5854,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4778,7 +5873,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4797,7 +5892,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4816,7 +5911,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4826,6 +5921,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4845,7 +5943,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4855,6 +5953,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4874,7 +5975,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4894,7 +5995,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4903,6 +6004,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4920,6 +6024,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4947,7 +6054,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4975,7 +6082,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4985,6 +6092,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5004,7 +6114,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5013,6 +6123,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5032,6 +6145,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5098,10 +6214,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -5130,6 +6249,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -5148,128 +6270,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1859 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0xd5, - 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, - 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0x6b, - 0x86, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, - 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, - 0x77, 0xdb, 0x6d, 0xa4, 0xf5, 0x21, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, - 0xc0, 0x8f, 0xce, 0xde, 0xf3, 0x35, 0xc3, 0xe9, 0x9e, 0x8d, 0xfa, 0xd4, 0xb3, 0x29, 0xa3, 0x7e, - 0x77, 0x4c, 0xed, 0x81, 0xe3, 0x75, 0xa5, 0x80, 0xb8, 0x46, 0x97, 0xb8, 0xae, 0xdf, 0x1d, 0xdf, - 0xee, 0x53, 0x46, 0x6e, 0x77, 0x87, 0xd4, 0xa6, 0x1e, 0x61, 0x74, 0xa0, 0xb9, 0x9e, 0xc3, 0x1c, - 0xb4, 0x16, 0x28, 0x6a, 0xc4, 0x35, 0x34, 0xae, 0xa8, 0x49, 0xc5, 0xf5, 0xed, 0xa1, 0xc1, 0x1e, - 0x8d, 0xfa, 0x9a, 0xee, 0x58, 0xdd, 0xa1, 0x33, 0x74, 0xba, 0x42, 0xbf, 0x3f, 0x7a, 0x28, 0xbe, - 0xc4, 0x87, 0xf8, 0x15, 0xe0, 0xac, 0xab, 0x09, 0x87, 0xba, 0xe3, 0xd1, 0xee, 0x38, 0xe7, 0x6b, - 0xfd, 0x9d, 0x58, 0xc7, 0x22, 0xfa, 0x23, 0xc3, 0xa6, 0xde, 0xa4, 0xeb, 0x9e, 0x0d, 0xf9, 0x82, - 0xdf, 0xb5, 0x28, 0x23, 0x45, 0x56, 0xdd, 0x32, 0x2b, 0x6f, 0x64, 0x33, 0xc3, 0xa2, 0x39, 0x83, - 0x77, 0x2f, 0x33, 0xf0, 0xf5, 0x47, 0xd4, 0x22, 0x39, 0xbb, 0xb7, 0xcb, 0xec, 0x46, 0xcc, 0x30, - 0xbb, 0x86, 0xcd, 0x7c, 0xe6, 0x65, 0x8d, 0xd4, 0xff, 0x28, 0x80, 0xf6, 0x1c, 0x9b, 0x79, 0x8e, - 0x69, 0x52, 0x0f, 0xd3, 0xb1, 0xe1, 0x1b, 0x8e, 0x8d, 0x3e, 0x87, 0x26, 0x3f, 0xcf, 0x80, 0x30, - 0xd2, 0x56, 0x36, 0x95, 0xad, 0xc5, 0x9d, 0xb7, 0xb4, 0xf8, 0xa6, 0x23, 0x78, 0xcd, 0x3d, 0x1b, - 0xf2, 0x05, 0x5f, 0xe3, 0xda, 0xda, 0xf8, 0xb6, 0x76, 0xd8, 0xff, 0x82, 0xea, 0xec, 0x80, 0x32, - 0xd2, 0x43, 0x4f, 0xce, 0x37, 0x66, 0xa6, 0xe7, 0x1b, 0x10, 0xaf, 0xe1, 0x08, 0x15, 0x1d, 0x42, - 0x5d, 0xa0, 0xd7, 0x04, 0xfa, 0x76, 0x29, 0xba, 0x3c, 0xb4, 0x86, 0xc9, 0xaf, 0xee, 0x3f, 0x66, - 0xd4, 0xe6, 0xdb, 0xeb, 0xbd, 0x20, 0xa1, 0xeb, 0xf7, 0x08, 0x23, 0x58, 0x00, 0xa1, 0x37, 0xa1, - 0xe9, 0xc9, 0xed, 0xb7, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xde, 0x0d, 0xa9, 0xd5, 0x0c, 0x8f, 0x85, - 0x23, 0x0d, 0xf5, 0x89, 0x02, 0xb7, 0xf2, 0xe7, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0x3b, 0xbb, - 0x56, 0xed, 0xec, 0xdc, 0x5a, 0x9c, 0x3c, 0x72, 0x1c, 0xae, 0x24, 0xce, 0x7d, 0x04, 0x0d, 0x83, - 0x51, 0xcb, 0x6f, 0xd7, 0x36, 0x67, 0xb7, 0x16, 0x77, 0xde, 0xd0, 0x4a, 0x02, 0x58, 0xcb, 0xef, - 0xae, 0xb7, 0x24, 0x71, 0x1b, 0x0f, 0x38, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0xd6, 0x00, 0xee, 0x51, - 0xd7, 0x74, 0x26, 0x16, 0xb5, 0xd9, 0x35, 0x3c, 0xdd, 0x03, 0xa8, 0xfb, 0x2e, 0xd5, 0xe5, 0xd3, - 0xbd, 0x5a, 0x7a, 0x82, 0x78, 0x53, 0xc7, 0x2e, 0xd5, 0xe3, 0x47, 0xe3, 0x5f, 0x58, 0x40, 0xa0, - 0x4f, 0x60, 0xce, 0x67, 0x84, 0x8d, 0x7c, 0xf1, 0x64, 0x8b, 0x3b, 0xaf, 0x55, 0x01, 0x13, 0x06, - 0xbd, 0x96, 0x84, 0x9b, 0x0b, 0xbe, 0xb1, 0x04, 0x52, 0xff, 0x3e, 0x0b, 0x2b, 0xb1, 0xf2, 0x9e, - 0x63, 0x0f, 0x0c, 0xc6, 0x43, 0xfa, 0x2e, 0xd4, 0xd9, 0xc4, 0xa5, 0xe2, 0x4e, 0x16, 0x7a, 0xaf, - 0x86, 0x9b, 0x39, 0x99, 0xb8, 0xf4, 0xd9, 0xf9, 0xc6, 0x5a, 0x81, 0x09, 0x17, 0x61, 0x61, 0x84, - 0xf6, 0xa3, 0x7d, 0xd6, 0x84, 0xf9, 0x3b, 0x69, 0xe7, 0xcf, 0xce, 0x37, 0x0a, 0x0a, 0x88, 0x16, - 0x21, 0xa5, 0xb7, 0x88, 0x5e, 0x81, 0x39, 0x8f, 0x12, 0xdf, 0xb1, 0xdb, 0x75, 0x81, 0x16, 0x1d, - 0x05, 0x8b, 0x55, 0x2c, 0xa5, 0xe8, 0x35, 0x98, 0xb7, 0xa8, 0xef, 0x93, 0x21, 0x6d, 0x37, 0x84, - 0xe2, 0xb2, 0x54, 0x9c, 0x3f, 0x08, 0x96, 0x71, 0x28, 0x47, 0x5f, 0x40, 0xcb, 0x24, 0x3e, 0x3b, - 0x75, 0x07, 0x84, 0xd1, 0x13, 0xc3, 0xa2, 0xed, 0x39, 0x71, 0xa1, 0xaf, 0x57, 0x7b, 0x7b, 0x6e, - 0xd1, 0xbb, 0x25, 0xd1, 0x5b, 0xfb, 0x29, 0x24, 0x9c, 0x41, 0x46, 0x63, 0x40, 0x7c, 0xe5, 0xc4, - 0x23, 0xb6, 0x1f, 0x5c, 0x14, 0xf7, 0x37, 0x7f, 0x65, 0x7f, 0xeb, 0xd2, 0x1f, 0xda, 0xcf, 0xa1, - 0xe1, 0x02, 0x0f, 0xea, 0x1f, 0x15, 0x68, 0xc5, 0xcf, 0x74, 0x0d, 0xb9, 0xfa, 0x51, 0x3a, 0x57, - 0xbf, 0x5f, 0x21, 0x38, 0x4b, 0x72, 0xf4, 0x9f, 0x35, 0x40, 0xb1, 0x12, 0x76, 0x4c, 0xb3, 0x4f, - 0xf4, 0x33, 0xb4, 0x09, 0x75, 0x9b, 0x58, 0x61, 0x4c, 0x46, 0x09, 0xf2, 0x13, 0x62, 0x51, 0x2c, - 0x24, 0xe8, 0x4b, 0x05, 0xd0, 0x48, 0x5c, 0xfd, 0x60, 0xd7, 0xb6, 0x1d, 0x46, 0xf8, 0x6d, 0x84, - 0x1b, 0xda, 0xab, 0xb0, 0xa1, 0xd0, 0x97, 0x76, 0x9a, 0x43, 0xb9, 0x6f, 0x33, 0x6f, 0x12, 0xbf, - 0x42, 0x5e, 0x01, 0x17, 0xb8, 0x46, 0x3f, 0x07, 0xf0, 0x24, 0xe6, 0x89, 0x23, 0xd3, 0xb6, 0xbc, - 0x06, 0x84, 0xee, 0xf7, 0x1c, 0xfb, 0xa1, 0x31, 0x8c, 0x0b, 0x0b, 0x8e, 0x20, 0x70, 0x02, 0x6e, - 0xfd, 0x3e, 0xac, 0x95, 0xec, 0x13, 0xdd, 0x80, 0xd9, 0x33, 0x3a, 0x09, 0xae, 0x0a, 0xf3, 0x9f, - 0x68, 0x15, 0x1a, 0x63, 0x62, 0x8e, 0x68, 0x90, 0x93, 0x38, 0xf8, 0xb8, 0x53, 0x7b, 0x4f, 0x51, - 0xff, 0xd0, 0x48, 0x46, 0x0a, 0xaf, 0x37, 0x68, 0x8b, 0xb7, 0x07, 0xd7, 0x34, 0x74, 0xe2, 0x0b, - 0x8c, 0x46, 0xef, 0x85, 0xa0, 0x35, 0x04, 0x6b, 0x38, 0x92, 0xa2, 0x5f, 0x42, 0xd3, 0xa7, 0x26, - 0xd5, 0x99, 0xe3, 0xc9, 0x12, 0xf7, 0x76, 0xc5, 0x98, 0x22, 0x7d, 0x6a, 0x1e, 0x4b, 0xd3, 0x00, - 0x3e, 0xfc, 0xc2, 0x11, 0x24, 0xfa, 0x04, 0x9a, 0x8c, 0x5a, 0xae, 0x49, 0x18, 0x95, 0xb7, 0x97, - 0x8a, 0x2b, 0x5e, 0x3b, 0x38, 0xd8, 0x91, 0x33, 0x38, 0x91, 0x6a, 0xa2, 0x7a, 0x46, 0x71, 0x1a, - 0xae, 0xe2, 0x08, 0x06, 0xfd, 0x0c, 0x9a, 0x3e, 0xe3, 0x5d, 0x7d, 0x38, 0x11, 0x15, 0xe5, 0xa2, - 0xb6, 0x92, 0xac, 0xa3, 0x81, 0x49, 0x0c, 0x1d, 0xae, 0xe0, 0x08, 0x0e, 0xed, 0xc2, 0xb2, 0x65, - 0xd8, 0x98, 0x92, 0xc1, 0xe4, 0x98, 0xea, 0x8e, 0x3d, 0xf0, 0x45, 0x29, 0x6a, 0xf4, 0xd6, 0xa4, - 0xd1, 0xf2, 0x41, 0x5a, 0x8c, 0xb3, 0xfa, 0x68, 0x1f, 0x56, 0xc3, 0xb6, 0xfb, 0x91, 0xe1, 0x33, - 0xc7, 0x9b, 0xec, 0x1b, 0x96, 0xc1, 0x44, 0x81, 0x6a, 0xf4, 0xda, 0xd3, 0xf3, 0x8d, 0x55, 0x5c, - 0x20, 0xc7, 0x85, 0x56, 0xbc, 0x76, 0xba, 0x64, 0xe4, 0xd3, 0x81, 0x28, 0x38, 0xcd, 0xb8, 0x76, - 0x1e, 0x89, 0x55, 0x2c, 0xa5, 0xe8, 0xa7, 0xa9, 0x30, 0x6d, 0x5e, 0x2d, 0x4c, 0x5b, 0xe5, 0x21, - 0x8a, 0x4e, 0x61, 0xcd, 0xf5, 0x9c, 0xa1, 0x47, 0x7d, 0xff, 0x1e, 0x25, 0x03, 0xd3, 0xb0, 0x69, - 0x78, 0x33, 0x0b, 0xe2, 0x44, 0x2f, 0x4d, 0xcf, 0x37, 0xd6, 0x8e, 0x8a, 0x55, 0x70, 0x99, 0xad, - 0xfa, 0x97, 0x3a, 0xdc, 0xc8, 0xf6, 0x38, 0xf4, 0x31, 0x20, 0xa7, 0xef, 0x53, 0x6f, 0x4c, 0x07, - 0x1f, 0x06, 0x83, 0x1b, 0x9f, 0x6e, 0x14, 0x31, 0xdd, 0x44, 0x79, 0x7b, 0x98, 0xd3, 0xc0, 0x05, - 0x56, 0xc1, 0x7c, 0x24, 0x13, 0xa0, 0x26, 0x36, 0x9a, 0x98, 0x8f, 0x72, 0x49, 0xb0, 0x0b, 0xcb, - 0x32, 0xf7, 0x43, 0xa1, 0x08, 0xd6, 0xc4, 0xbb, 0x9f, 0xa6, 0xc5, 0x38, 0xab, 0x8f, 0x3e, 0x84, - 0x9b, 0x64, 0x4c, 0x0c, 0x93, 0xf4, 0x4d, 0x1a, 0x81, 0xd4, 0x05, 0xc8, 0x8b, 0x12, 0xe4, 0xe6, - 0x6e, 0x56, 0x01, 0xe7, 0x6d, 0xd0, 0x01, 0xac, 0x8c, 0xec, 0x3c, 0x54, 0x10, 0x87, 0x2f, 0x49, - 0xa8, 0x95, 0xd3, 0xbc, 0x0a, 0x2e, 0xb2, 0x43, 0x9f, 0x03, 0xe8, 0x61, 0x63, 0xf6, 0xdb, 0x73, - 0xa2, 0x92, 0xbe, 0x59, 0x21, 0x5f, 0xa2, 0x6e, 0x1e, 0x57, 0xb1, 0x68, 0xc9, 0xc7, 0x09, 0x4c, - 0x74, 0x17, 0x96, 0x3c, 0x9e, 0x01, 0xd1, 0x56, 0xe7, 0xc5, 0x56, 0xbf, 0x23, 0xcd, 0x96, 0x70, - 0x52, 0x88, 0xd3, 0xba, 0xe8, 0x0e, 0xb4, 0x74, 0xc7, 0x34, 0x45, 0xe4, 0xef, 0x39, 0x23, 0x9b, - 0x89, 0xe0, 0x6d, 0xf4, 0x10, 0xef, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, - 0x6d, 0x26, 0x4c, 0x67, 0x74, 0x27, 0x35, 0xfa, 0xbc, 0x92, 0x19, 0x7d, 0x6e, 0xe5, 0x2d, 0x12, - 0x93, 0x8f, 0x01, 0x4b, 0x3c, 0xf8, 0x0d, 0x7b, 0x18, 0x3c, 0xb8, 0x2c, 0x89, 0x6f, 0x5d, 0x98, - 0x4a, 0x91, 0x76, 0xa2, 0x31, 0xde, 0x14, 0x27, 0x4f, 0x0a, 0x71, 0x1a, 0x59, 0xfd, 0x00, 0x5a, - 0xe9, 0x3c, 0x4c, 0xcd, 0xf4, 0xca, 0xa5, 0x33, 0xfd, 0x37, 0x0a, 0xac, 0x95, 0x78, 0x47, 0x26, - 0xb4, 0x2c, 0xf2, 0x38, 0x11, 0x23, 0x97, 0xce, 0xc6, 0x9c, 0x35, 0x69, 0x01, 0x6b, 0xd2, 0x1e, - 0xd8, 0xec, 0xd0, 0x3b, 0x66, 0x9e, 0x61, 0x0f, 0x83, 0x77, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, - 0xfa, 0x0c, 0x9a, 0x16, 0x79, 0x7c, 0x3c, 0xf2, 0x86, 0x45, 0xf7, 0x55, 0xcd, 0x8f, 0xe8, 0x1f, - 0x07, 0x12, 0x05, 0x47, 0x78, 0xea, 0x21, 0x6c, 0xa6, 0x0e, 0xc9, 0x4b, 0x05, 0x7d, 0x38, 0x32, - 0x8f, 0x69, 0xfc, 0xe0, 0x6f, 0xc0, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xe5, 0xa2, 0xd1, 0x5b, 0x9a, - 0x9e, 0x6f, 0x2c, 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0x71, 0xac, 0x13, 0x93, - 0x5e, 0x03, 0x75, 0xb8, 0x97, 0xa2, 0x0e, 0x6a, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, - 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, - 0xe5, 0xb2, 0x2a, 0xa9, 0xfe, 0xbe, 0x06, 0x8b, 0x09, 0x17, 0x57, 0xb3, 0xe6, 0xd7, 0x9d, 0x18, - 0x34, 0x78, 0x19, 0xda, 0xa9, 0x72, 0x10, 0x2d, 0x1c, 0x2a, 0x82, 0xf9, 0x2d, 0xee, 0xde, 0xf9, - 0x59, 0xe3, 0x03, 0x68, 0x31, 0xe2, 0x0d, 0x29, 0x0b, 0x65, 0xe2, 0xc2, 0x16, 0xe2, 0x49, 0xff, - 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, - 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, - 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x57, 0x0d, 0x56, 0x13, - 0xda, 0x31, 0x37, 0xfd, 0x61, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, - 0xd3, 0x62, 0x76, 0x37, 0xfb, 0xbc, 0xd9, 0xdd, 0x73, 0x20, 0xc5, 0xea, 0x9f, 0x14, 0x58, 0x4e, - 0xdc, 0xdd, 0x35, 0x30, 0xc6, 0x07, 0x69, 0xc6, 0xf8, 0x72, 0x95, 0xa0, 0x29, 0xa1, 0x8c, 0x7f, - 0x6d, 0xa4, 0x36, 0xff, 0xad, 0x27, 0x31, 0xbf, 0x86, 0xd5, 0xb1, 0x63, 0x8e, 0x2c, 0xba, 0x67, - 0x12, 0xc3, 0x0a, 0x15, 0xf8, 0xc4, 0x38, 0x9b, 0xfd, 0x63, 0x28, 0x82, 0xa7, 0x9e, 0x6f, 0xf8, - 0x8c, 0xda, 0xec, 0xd3, 0xd8, 0xb2, 0xf7, 0x5d, 0xe9, 0x64, 0xf5, 0xd3, 0x02, 0x38, 0x5c, 0xe8, - 0x04, 0xfd, 0x00, 0x16, 0xf9, 0xc0, 0x6c, 0xe8, 0x94, 0x73, 0x6f, 0x19, 0x58, 0x2b, 0x12, 0x68, - 0xf1, 0x38, 0x16, 0xe1, 0xa4, 0x1e, 0x7a, 0x04, 0x2b, 0xae, 0x33, 0x38, 0x20, 0x36, 0x19, 0x52, - 0x3e, 0x66, 0x1c, 0x39, 0xa6, 0xa1, 0x4f, 0x04, 0xb3, 0x59, 0xe8, 0xbd, 0x1b, 0x4e, 0xa6, 0x47, - 0x79, 0x95, 0x67, 0x9c, 0x22, 0xe4, 0x97, 0x45, 0x52, 0x17, 0x41, 0x22, 0x0f, 0x5a, 0x23, 0xd9, - 0xee, 0x25, 0xd1, 0x0b, 0xfe, 0x6f, 0xd9, 0xa9, 0x12, 0x61, 0xa7, 0x29, 0xcb, 0xb8, 0xfa, 0xa7, - 0xd7, 0x71, 0xc6, 0x43, 0x29, 0x71, 0x6b, 0xfe, 0x3f, 0xc4, 0x4d, 0xfd, 0x77, 0x1d, 0x6e, 0xe6, - 0x4a, 0x25, 0xfa, 0xf1, 0x05, 0x0c, 0xe7, 0xd6, 0x73, 0x63, 0x37, 0xb9, 0x01, 0x7d, 0xf6, 0x0a, - 0x03, 0xfa, 0x2e, 0x2c, 0xeb, 0x23, 0xcf, 0xa3, 0x36, 0xcb, 0xb0, 0x9a, 0x88, 0x1a, 0xed, 0xa5, - 0xc5, 0x38, 0xab, 0x5f, 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, - 0xbb, 0xfc, 0x2e, 0xe4, 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, - 0x83, 0xd3, 0x94, 0x14, 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x84, - 0x81, 0xc8, 0xf1, 0xed, 0x2a, 0xb1, 0x5c, 0x99, 0x85, 0xa9, 0x7f, 0x53, 0xe0, 0xc5, 0xd2, 0x24, - 0x40, 0xbb, 0xa9, 0x96, 0xbb, 0x9d, 0x69, 0xb9, 0xdf, 0x2b, 0x35, 0x4c, 0xf4, 0x5d, 0xaf, 0x98, - 0x1a, 0xbd, 0x5f, 0x8d, 0x1a, 0x15, 0xcc, 0xed, 0x97, 0x73, 0xa4, 0xde, 0xf6, 0x93, 0xa7, 0x9d, - 0x99, 0xaf, 0x9e, 0x76, 0x66, 0xbe, 0x7e, 0xda, 0x99, 0xf9, 0xcd, 0xb4, 0xa3, 0x3c, 0x99, 0x76, - 0x94, 0xaf, 0xa6, 0x1d, 0xe5, 0xeb, 0x69, 0x47, 0xf9, 0xc7, 0xb4, 0xa3, 0xfc, 0xee, 0x9b, 0xce, - 0xcc, 0x67, 0xf3, 0xd2, 0xe3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x89, 0x29, 0x5c, 0x61, - 0x1b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 7942b997..694f6570 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -43,7 +43,7 @@ option go_package = "v1beta1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -56,7 +56,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -277,15 +277,15 @@ message RollingUpdateStatefulSetStrategy { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index cf6039df..b77fcf7a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -60,15 +60,15 @@ type ScaleStatus struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -134,13 +134,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -541,7 +541,7 @@ type DeploymentList struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -558,7 +558,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index da1eb599..504b8586 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -167,9 +167,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index 93892bfd..fb276124 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -137,7 +137,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -470,7 +470,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index fc1efbc9..624bb942 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -17,62 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto -/* - Package v1beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1beta2 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -85,135 +49,873 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_42fe616264472f7e, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_42fe616264472f7e, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) } -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{22} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_Scale proto.InternalMessageInfo -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{23} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{24} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{25} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{26} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{27} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{28} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{29} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} + return fileDescriptor_42fe616264472f7e, []int{30} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() } +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") @@ -241,6 +943,7 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") @@ -248,10 +951,155 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_42fe616264472f7e) +} + +var fileDescriptor_42fe616264472f7e = []byte{ + // 2171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, + 0xf9, 0xd6, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0xf4, 0x93, 0x36, 0xf2, 0xaf, 0x2b, 0x63, + 0x13, 0x38, 0x4a, 0x6c, 0xcd, 0xda, 0xca, 0x07, 0x12, 0xbb, 0x68, 0xab, 0x95, 0x52, 0xdb, 0x81, + 0xbe, 0x42, 0x59, 0x06, 0x1a, 0xb4, 0xa8, 0xa9, 0x5d, 0x7a, 0x35, 0xd1, 0x7c, 0x61, 0x86, 0xb3, + 0xf5, 0xa2, 0x97, 0x9e, 0x0a, 0x14, 0x28, 0xd0, 0xf6, 0xda, 0x7f, 0xa2, 0xb7, 0xa2, 0x68, 0x6f, + 0x45, 0x50, 0xf8, 0x52, 0x20, 0xe8, 0x25, 0x39, 0x09, 0xf5, 0xe6, 0x54, 0x14, 0xbd, 0x14, 0xe8, + 0x25, 0x40, 0x81, 0x82, 0x1c, 0xce, 0x07, 0xe7, 0xc3, 0x3b, 0x52, 0x1c, 0xa5, 0x29, 0x72, 0xd3, + 0x92, 0xcf, 0xfb, 0xf0, 0x7d, 0xc9, 0x97, 0x7c, 0x1f, 0x72, 0x04, 0xbe, 0x73, 0xfc, 0x96, 0xab, + 0x6a, 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, + 0x25, 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x43, 0x42, 0xf1, 0x5a, 0xab, + 0x47, 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, + 0xa6, 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, + 0x3d, 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc4, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x9f, 0x67, + 0xa9, 0x19, 0x1b, 0xb0, 0x63, 0x39, 0xa4, 0xd5, 0xbf, 0x99, 0x1c, 0x6b, 0xe9, 0xf5, 0x08, 0x63, + 0xe0, 0xce, 0x91, 0x66, 0x12, 0x67, 0xd0, 0xb2, 0x8f, 0x7b, 0xac, 0xc1, 0x6d, 0x19, 0x84, 0xe2, + 0x2c, 0xab, 0x56, 0x9e, 0x95, 0xe3, 0x99, 0x54, 0x33, 0x48, 0xca, 0xe0, 0xcd, 0x51, 0x06, 0x6e, + 0xe7, 0x88, 0x18, 0x38, 0x65, 0xf7, 0x5a, 0x9e, 0x9d, 0x47, 0x35, 0xbd, 0xa5, 0x99, 0xd4, 0xa5, + 0x4e, 0xd2, 0xa8, 0xf9, 0x2f, 0x05, 0xc0, 0x0d, 0xcb, 0xa4, 0x8e, 0xa5, 0xeb, 0xc4, 0x41, 0xa4, + 0xaf, 0xb9, 0x9a, 0x65, 0xc2, 0x87, 0xa0, 0xc6, 0xe2, 0xe9, 0x62, 0x8a, 0xeb, 0xca, 0x15, 0x65, + 0x65, 0x6a, 0xed, 0x86, 0x1a, 0xcd, 0x74, 0x48, 0xaf, 0xda, 0xc7, 0x3d, 0xd6, 0xe0, 0xaa, 0x0c, + 0xad, 0xf6, 0x6f, 0xaa, 0xbb, 0x87, 0x1f, 0x90, 0x0e, 0xdd, 0x26, 0x14, 0xb7, 0xe1, 0x93, 0x93, + 0xe5, 0xb1, 0xe1, 0xc9, 0x32, 0x88, 0xda, 0x50, 0xc8, 0x0a, 0x77, 0x41, 0x85, 0xb3, 0x97, 0x38, + 0xfb, 0x6a, 0x2e, 0xbb, 0x08, 0x5a, 0x45, 0xf8, 0x47, 0xef, 0x3c, 0xa6, 0xc4, 0x64, 0xee, 0xb5, + 0x2f, 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x1d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, + 0x7c, 0x45, 0x59, 0x29, 0xb7, 0x2f, 0x0a, 0x54, 0x2d, 0x08, 0x0b, 0x85, 0x88, 0xe6, 0x13, 0x05, + 0x2c, 0xa4, 0xe3, 0xde, 0xd2, 0x5c, 0x0a, 0xbf, 0x9f, 0x8a, 0x5d, 0x2d, 0x16, 0x3b, 0xb3, 0xe6, + 0x91, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0xb8, 0xf7, 0x40, 0x55, 0xa3, 0xc4, 0x70, 0xeb, 0xa5, 0x2b, + 0xe5, 0x95, 0xa9, 0xb5, 0x6b, 0x6a, 0x4e, 0x02, 0xab, 0x69, 0xef, 0xda, 0xd3, 0x82, 0xb7, 0x7a, + 0x8f, 0x31, 0x20, 0x9f, 0xa8, 0xf9, 0xb3, 0x12, 0x98, 0xdc, 0xc4, 0xc4, 0xb0, 0xcc, 0x7d, 0x42, + 0xcf, 0x61, 0xe5, 0xee, 0x82, 0x8a, 0x6b, 0x93, 0x8e, 0x58, 0xb9, 0xab, 0xb9, 0x01, 0x84, 0x3e, + 0xed, 0xdb, 0xa4, 0x13, 0x2d, 0x19, 0xfb, 0x85, 0x38, 0x03, 0xdc, 0x03, 0xe3, 0x2e, 0xc5, 0xd4, + 0x73, 0xf9, 0x82, 0x4d, 0xad, 0xad, 0x14, 0xe0, 0xe2, 0xf8, 0xf6, 0x8c, 0x60, 0x1b, 0xf7, 0x7f, + 0x23, 0xc1, 0xd3, 0xfc, 0x5b, 0x09, 0xc0, 0x10, 0xbb, 0x61, 0x99, 0x5d, 0x8d, 0xb2, 0x74, 0xbe, + 0x05, 0x2a, 0x74, 0x60, 0x13, 0x3e, 0x21, 0x93, 0xed, 0xab, 0x81, 0x2b, 0xf7, 0x07, 0x36, 0xf9, + 0xec, 0x64, 0x79, 0x21, 0x6d, 0xc1, 0x7a, 0x10, 0xb7, 0x81, 0x5b, 0xa1, 0x93, 0x25, 0x6e, 0xfd, + 0xba, 0x3c, 0xf4, 0x67, 0x27, 0xcb, 0x19, 0x67, 0x87, 0x1a, 0x32, 0xc9, 0x0e, 0xc2, 0x3e, 0x80, + 0x3a, 0x76, 0xe9, 0x7d, 0x07, 0x9b, 0xae, 0x3f, 0x92, 0x66, 0x10, 0x11, 0xfe, 0xab, 0xc5, 0x16, + 0x8a, 0x59, 0xb4, 0x97, 0x84, 0x17, 0x70, 0x2b, 0xc5, 0x86, 0x32, 0x46, 0x80, 0x57, 0xc1, 0xb8, + 0x43, 0xb0, 0x6b, 0x99, 0xf5, 0x0a, 0x8f, 0x22, 0x9c, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0xaf, + 0x80, 0x09, 0x83, 0xb8, 0x2e, 0xee, 0x91, 0x7a, 0x95, 0x03, 0x67, 0x05, 0x70, 0x62, 0xdb, 0x6f, + 0x46, 0x41, 0x7f, 0xf3, 0xb7, 0x0a, 0x98, 0x0e, 0x67, 0xee, 0x1c, 0x76, 0xce, 0x1d, 0x79, 0xe7, + 0x34, 0x47, 0x27, 0x4b, 0xce, 0x86, 0xf9, 0xb0, 0x1c, 0x73, 0x9c, 0xa5, 0x23, 0xfc, 0x01, 0xa8, + 0xb9, 0x44, 0x27, 0x1d, 0x6a, 0x39, 0xc2, 0xf1, 0xd7, 0x0a, 0x3a, 0x8e, 0x0f, 0x89, 0xbe, 0x2f, + 0x4c, 0xdb, 0x17, 0x98, 0xe7, 0xc1, 0x2f, 0x14, 0x52, 0xc2, 0xf7, 0x40, 0x8d, 0x12, 0xc3, 0xd6, + 0x31, 0x25, 0x62, 0xd7, 0xbc, 0x18, 0x77, 0x9e, 0xe5, 0x0c, 0x23, 0xdb, 0xb3, 0xba, 0xf7, 0x05, + 0x8c, 0x6f, 0x99, 0x70, 0x32, 0x82, 0x56, 0x14, 0xd2, 0x40, 0x1b, 0xcc, 0x78, 0x76, 0x97, 0x21, + 0x29, 0x3b, 0xce, 0x7b, 0x03, 0x91, 0x43, 0x37, 0x46, 0xcf, 0xca, 0x81, 0x64, 0xd7, 0x5e, 0x10, + 0xa3, 0xcc, 0xc8, 0xed, 0x28, 0xc1, 0x0f, 0xd7, 0xc1, 0xac, 0xa1, 0x99, 0x88, 0xe0, 0xee, 0x60, + 0x9f, 0x74, 0x2c, 0xb3, 0xeb, 0xf2, 0x54, 0xaa, 0xb6, 0x17, 0x05, 0xc1, 0xec, 0xb6, 0xdc, 0x8d, + 0x92, 0x78, 0xb8, 0x05, 0xe6, 0x83, 0x03, 0xf8, 0xae, 0xe6, 0x52, 0xcb, 0x19, 0x6c, 0x69, 0x86, + 0x46, 0xeb, 0xe3, 0x9c, 0xa7, 0x3e, 0x3c, 0x59, 0x9e, 0x47, 0x19, 0xfd, 0x28, 0xd3, 0xaa, 0xf9, + 0xab, 0x71, 0x30, 0x9b, 0x38, 0x17, 0xe0, 0x03, 0xb0, 0xd0, 0xf1, 0x1c, 0x87, 0x98, 0x74, 0xc7, + 0x33, 0x0e, 0x89, 0xb3, 0xdf, 0x39, 0x22, 0x5d, 0x4f, 0x27, 0x5d, 0xbe, 0xac, 0xd5, 0x76, 0x43, + 0xf8, 0xba, 0xb0, 0x91, 0x89, 0x42, 0x39, 0xd6, 0xf0, 0x5d, 0x00, 0x4d, 0xde, 0xb4, 0xad, 0xb9, + 0x6e, 0xc8, 0x59, 0xe2, 0x9c, 0xe1, 0x56, 0xdc, 0x49, 0x21, 0x50, 0x86, 0x15, 0xf3, 0xb1, 0x4b, + 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f, 0x65, 0xd9, 0xc7, 0xcd, 0x4c, 0x14, 0xca, 0xb1, 0x86, 0x6f, + 0x80, 0x29, 0x7f, 0x34, 0x3e, 0xe7, 0x62, 0x71, 0xe6, 0x04, 0xd9, 0xd4, 0x4e, 0xd4, 0x85, 0xe2, + 0x38, 0x16, 0x9a, 0x75, 0xe8, 0x12, 0xa7, 0x4f, 0xba, 0x77, 0x7c, 0x71, 0xc0, 0x2a, 0x68, 0x95, + 0x57, 0xd0, 0x30, 0xb4, 0xdd, 0x14, 0x02, 0x65, 0x58, 0xb1, 0xd0, 0xfc, 0xac, 0x49, 0x85, 0x36, + 0x2e, 0x87, 0x76, 0x90, 0x89, 0x42, 0x39, 0xd6, 0x2c, 0xf7, 0x7c, 0x97, 0xd7, 0xfb, 0x58, 0xd3, + 0xf1, 0xa1, 0x4e, 0xea, 0x13, 0x72, 0xee, 0xed, 0xc8, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x25, + 0xbf, 0xe9, 0xc0, 0xc4, 0x21, 0x49, 0x8d, 0x93, 0xbc, 0x20, 0x48, 0x2e, 0xed, 0x24, 0x01, 0x28, + 0x6d, 0x03, 0x6f, 0x81, 0x99, 0x8e, 0xa5, 0xeb, 0x3c, 0x1f, 0x37, 0x2c, 0xcf, 0xa4, 0xf5, 0x49, + 0xce, 0x02, 0xd9, 0x1e, 0xda, 0x90, 0x7a, 0x50, 0x02, 0x09, 0x7f, 0x08, 0x40, 0x27, 0x28, 0x0c, + 0x6e, 0x1d, 0x8c, 0x50, 0x00, 0xe9, 0xb2, 0x14, 0x55, 0xe6, 0xb0, 0xc9, 0x45, 0x31, 0xca, 0xe6, + 0x87, 0x0a, 0x58, 0xcc, 0xd9, 0xe8, 0xf0, 0xdb, 0x52, 0x11, 0xbc, 0x96, 0x28, 0x82, 0x97, 0x73, + 0xcc, 0x62, 0x95, 0xf0, 0x08, 0x4c, 0x33, 0x41, 0xa2, 0x99, 0x3d, 0x1f, 0x22, 0xce, 0xb2, 0x56, + 0x6e, 0x00, 0x28, 0x8e, 0x8e, 0x4e, 0xe5, 0x4b, 0xc3, 0x93, 0xe5, 0x69, 0xa9, 0x0f, 0xc9, 0xc4, + 0xcd, 0x9f, 0x97, 0x00, 0xd8, 0x24, 0xb6, 0x6e, 0x0d, 0x0c, 0x62, 0x9e, 0x87, 0xa6, 0xb9, 0x27, + 0x69, 0x9a, 0x97, 0xf3, 0x97, 0x24, 0x74, 0x2a, 0x57, 0xd4, 0xbc, 0x97, 0x10, 0x35, 0xaf, 0x14, + 0x21, 0x7b, 0xb6, 0xaa, 0xf9, 0xb8, 0x0c, 0xe6, 0x22, 0x70, 0x24, 0x6b, 0x6e, 0x4b, 0x2b, 0xfa, + 0x72, 0x62, 0x45, 0x17, 0x33, 0x4c, 0xbe, 0x30, 0x5d, 0xf3, 0x01, 0x98, 0x61, 0xaa, 0xc3, 0x5f, + 0x3f, 0xae, 0x69, 0xc6, 0x4f, 0xad, 0x69, 0xc2, 0x4a, 0xb4, 0x25, 0x31, 0xa1, 0x04, 0x73, 0x8e, + 0x86, 0x9a, 0xf8, 0x2a, 0x6a, 0xa8, 0xdf, 0x29, 0x60, 0x26, 0x5a, 0xa6, 0x73, 0x10, 0x51, 0x77, + 0x65, 0x11, 0xf5, 0x62, 0x81, 0xe4, 0xcc, 0x51, 0x51, 0x1f, 0x57, 0xe2, 0xae, 0x73, 0x19, 0xb5, + 0xc2, 0xae, 0x60, 0xb6, 0xae, 0x75, 0xb0, 0x2b, 0xea, 0xed, 0x05, 0xff, 0xfa, 0xe5, 0xb7, 0xa1, + 0xb0, 0x57, 0x12, 0x5c, 0xa5, 0x2f, 0x56, 0x70, 0x95, 0x9f, 0x8f, 0xe0, 0xfa, 0x1e, 0xa8, 0xb9, + 0x81, 0xd4, 0xaa, 0x70, 0xca, 0x6b, 0x85, 0x36, 0xb6, 0x50, 0x59, 0x21, 0x75, 0xa8, 0xaf, 0x42, + 0xba, 0x2c, 0x65, 0x55, 0xfd, 0x32, 0x95, 0x15, 0x4b, 0x74, 0x1b, 0x7b, 0x2e, 0xe9, 0xf2, 0x4d, + 0x55, 0x8b, 0x12, 0x7d, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, 0xed, 0x58, 0x3d, 0x87, + 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x10, 0x80, 0x5f, 0x13, 0x2f, 0x0f, 0x4f, 0x96, + 0x17, 0xf7, 0xb2, 0x21, 0x28, 0xcf, 0xb6, 0xf9, 0xc7, 0x0a, 0xb8, 0x98, 0x3c, 0x1b, 0x73, 0x64, + 0x8a, 0x72, 0x26, 0x99, 0x72, 0x3d, 0x96, 0xa7, 0xbe, 0x86, 0x8b, 0x3d, 0x15, 0xa4, 0x72, 0x75, + 0x1d, 0xcc, 0x0a, 0x59, 0x12, 0x74, 0x0a, 0xa1, 0x16, 0x2e, 0xcf, 0x81, 0xdc, 0x8d, 0x92, 0x78, + 0x78, 0x1b, 0x4c, 0x3b, 0x5c, 0x79, 0x05, 0x04, 0xbe, 0x7a, 0xf9, 0x3f, 0x41, 0x30, 0x8d, 0xe2, + 0x9d, 0x48, 0xc6, 0x32, 0xe5, 0x12, 0x09, 0x92, 0x80, 0xa0, 0x22, 0x2b, 0x97, 0xf5, 0x24, 0x00, + 0xa5, 0x6d, 0xe0, 0x36, 0x98, 0xf3, 0xcc, 0x34, 0x95, 0x9f, 0x6b, 0x97, 0x05, 0xd5, 0xdc, 0x41, + 0x1a, 0x82, 0xb2, 0xec, 0xe0, 0x43, 0x49, 0xcc, 0x8c, 0xf3, 0xf3, 0xe4, 0x7a, 0x81, 0x3d, 0x51, + 0x58, 0xcd, 0x64, 0x48, 0xad, 0x5a, 0x51, 0xa9, 0xd5, 0xfc, 0x83, 0x02, 0x60, 0x7a, 0x1f, 0x8e, + 0x7c, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, + 0x5a, 0x4c, 0x00, 0x89, 0x89, 0x3e, 0x9f, 0x47, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, + 0xa0, 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0xdf, 0x4b, 0x60, 0x2e, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, + 0x5f, 0x3f, 0xec, 0x14, 0x13, 0x25, 0xd1, 0xd4, 0xfd, 0x37, 0x89, 0x92, 0xc8, 0xab, 0x1c, 0x51, + 0xf2, 0x9b, 0x52, 0xdc, 0xf5, 0x53, 0x8a, 0x92, 0xe7, 0xf0, 0xc2, 0xf1, 0x95, 0xd3, 0x35, 0xcd, + 0x3f, 0x95, 0xc1, 0xc5, 0xe4, 0x3e, 0x94, 0x0a, 0xa4, 0x32, 0xb2, 0x40, 0xee, 0x81, 0xf9, 0x47, + 0x9e, 0xae, 0x0f, 0x78, 0x0c, 0xb1, 0x2a, 0xe9, 0x97, 0xd6, 0xff, 0x17, 0x96, 0xf3, 0xdf, 0xcd, + 0xc0, 0xa0, 0x4c, 0xcb, 0x74, 0xbd, 0xac, 0x7c, 0xde, 0x7a, 0x59, 0x3d, 0x43, 0xbd, 0xcc, 0x96, + 0x1c, 0xe5, 0x33, 0x49, 0x8e, 0xd3, 0x15, 0xcb, 0x8c, 0x83, 0x6b, 0xe4, 0xd5, 0xff, 0xa7, 0x0a, + 0x58, 0xc8, 0xbe, 0x70, 0x43, 0x1d, 0xcc, 0x18, 0xf8, 0x71, 0xfc, 0xe1, 0x63, 0x54, 0x11, 0xf1, + 0xa8, 0xa6, 0xab, 0xfe, 0x27, 0x23, 0xf5, 0x9e, 0x49, 0x77, 0x9d, 0x7d, 0xea, 0x68, 0x66, 0xcf, + 0xaf, 0xbc, 0xdb, 0x12, 0x17, 0x4a, 0x70, 0x37, 0x3f, 0x55, 0xc0, 0x62, 0x4e, 0xe5, 0x3b, 0x5f, + 0x4f, 0xe0, 0xfb, 0xa0, 0x66, 0xe0, 0xc7, 0xfb, 0x9e, 0xd3, 0xcb, 0xaa, 0xd5, 0xc5, 0xc6, 0xe1, + 0x5b, 0x71, 0x5b, 0xb0, 0xa0, 0x90, 0xaf, 0xb9, 0x0b, 0xae, 0x48, 0x41, 0xb2, 0x9d, 0x43, 0x1e, + 0x79, 0x3a, 0xdf, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x93, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, + 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x6f, 0x05, 0x54, 0xf7, + 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, 0xff, 0x92, 0xce, 0xfd, 0xc9, 0x2d, + 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xd2, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0xb7, 0xc1, 0x64, 0x38, 0xdc, + 0xe9, 0x0e, 0xa0, 0xe6, 0xaf, 0x4b, 0x60, 0x2a, 0x36, 0xc4, 0x29, 0x8f, 0xaf, 0x87, 0xd2, 0x99, + 0xcd, 0x36, 0xe6, 0x5a, 0x91, 0x40, 0xd4, 0xe0, 0x7c, 0x7e, 0xc7, 0xa4, 0x4e, 0xfc, 0x82, 0x97, + 0x3e, 0xb6, 0xbf, 0x05, 0x66, 0x28, 0x76, 0x7a, 0x84, 0x06, 0x7d, 0x7c, 0xc2, 0x26, 0xa3, 0x07, + 0x8f, 0xfb, 0x52, 0x2f, 0x4a, 0xa0, 0x97, 0x6e, 0x83, 0x69, 0x69, 0x30, 0x78, 0x11, 0x94, 0x8f, + 0xc9, 0xc0, 0x97, 0x3d, 0x88, 0xfd, 0x09, 0xe7, 0x41, 0xb5, 0x8f, 0x75, 0xcf, 0xcf, 0xf3, 0x49, + 0xe4, 0xff, 0xb8, 0x55, 0x7a, 0x4b, 0x69, 0xfe, 0x82, 0x4d, 0x4e, 0x94, 0x9c, 0xe7, 0x90, 0x5d, + 0xef, 0x4a, 0xd9, 0x95, 0xff, 0x51, 0x2f, 0xbe, 0x65, 0xf2, 0x72, 0x0c, 0x25, 0x72, 0xec, 0xd5, + 0x42, 0x6c, 0xcf, 0xce, 0xb4, 0x7f, 0x94, 0xc0, 0x7c, 0x0c, 0x1d, 0xc9, 0xc9, 0x6f, 0x4a, 0x72, + 0x72, 0x25, 0x21, 0x27, 0xeb, 0x59, 0x36, 0x5f, 0xeb, 0xc9, 0xd1, 0x7a, 0xf2, 0xf7, 0x0a, 0x98, + 0x8d, 0xcd, 0xdd, 0x39, 0x08, 0xca, 0x7b, 0xb2, 0xa0, 0x7c, 0xa9, 0x48, 0xd2, 0xe4, 0x28, 0xca, + 0x3f, 0x57, 0x25, 0xe7, 0xff, 0xe7, 0xdf, 0xb9, 0x7e, 0x0c, 0xe6, 0xfb, 0x96, 0xee, 0x19, 0x64, + 0x43, 0xc7, 0x9a, 0x11, 0x00, 0x98, 0x02, 0x2b, 0x27, 0xef, 0x72, 0x21, 0x3d, 0x71, 0x5c, 0xcd, + 0xa5, 0xc4, 0xa4, 0x0f, 0x22, 0xcb, 0x48, 0xf7, 0x3d, 0xc8, 0xa0, 0x43, 0x99, 0x83, 0xc0, 0x37, + 0xc0, 0x14, 0x53, 0x4e, 0x5a, 0x87, 0xec, 0x60, 0x23, 0x48, 0xac, 0xf0, 0x13, 0xd6, 0x7e, 0xd4, + 0x85, 0xe2, 0x38, 0x78, 0x04, 0xe6, 0x6c, 0xab, 0xbb, 0x8d, 0x4d, 0xdc, 0x23, 0x4c, 0x66, 0xec, + 0x59, 0xba, 0xd6, 0x19, 0xf0, 0xc7, 0xaf, 0xc9, 0xf6, 0x9b, 0xc1, 0xc3, 0xc6, 0x5e, 0x1a, 0xc2, + 0x2e, 0x89, 0x19, 0xcd, 0x7c, 0x53, 0x67, 0x51, 0x42, 0x27, 0xf5, 0xd9, 0xd5, 0x7f, 0x76, 0x5e, + 0x2b, 0x92, 0x61, 0x67, 0xfc, 0xf0, 0x9a, 0xf7, 0xb6, 0x57, 0x3b, 0xd3, 0x57, 0xd3, 0x7f, 0x56, + 0xc0, 0xa5, 0xd4, 0x51, 0xf9, 0x25, 0xbe, 0xae, 0xa5, 0xa4, 0x7e, 0xf9, 0x14, 0x52, 0x7f, 0x1d, + 0xcc, 0x8a, 0x0f, 0xb6, 0x89, 0x9b, 0x42, 0x78, 0x63, 0xdb, 0x90, 0xbb, 0x51, 0x12, 0x9f, 0xf5, + 0xba, 0x57, 0x3d, 0xe5, 0xeb, 0x5e, 0xdc, 0x0b, 0xf1, 0x0f, 0x48, 0x7e, 0xea, 0xa5, 0xbd, 0x10, + 0xff, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, 0xb2, 0x42, 0x38, 0x90, 0x7a, + 0x51, 0x02, 0xfd, 0xb9, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, 0x45, 0xf2, 0xb9, 0xf8, 0xdd, + 0xe4, 0x2f, 0x0a, 0x78, 0x21, 0x77, 0x23, 0xc0, 0x75, 0xa9, 0xec, 0xae, 0x26, 0xca, 0xee, 0x37, + 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0x7e, 0x9a, 0x7b, 0xbb, 0xd8, 0xd3, 0x5c, 0x86, 0x76, 0x1f, + 0xfd, 0x46, 0xd7, 0x5e, 0x7d, 0xf2, 0xb4, 0x31, 0xf6, 0xd1, 0xd3, 0xc6, 0xd8, 0x27, 0x4f, 0x1b, + 0x63, 0x3f, 0x19, 0x36, 0x94, 0x27, 0xc3, 0x86, 0xf2, 0xd1, 0xb0, 0xa1, 0x7c, 0x32, 0x6c, 0x28, + 0x7f, 0x1d, 0x36, 0x94, 0x5f, 0x7e, 0xda, 0x18, 0x7b, 0x7f, 0x42, 0x8c, 0xf8, 0x9f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x70, 0x2d, 0x26, 0x9d, 0xec, 0x28, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,36 +1107,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -296,37 +1153,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -334,41 +1200,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -376,41 +1253,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -418,37 +1306,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -456,51 +1353,62 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -508,58 +1416,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -567,31 +1482,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -599,41 +1522,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -641,49 +1575,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -691,37 +1638,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -729,69 +1685,80 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n19 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -799,52 +1766,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -852,31 +1826,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -884,41 +1866,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -926,41 +1919,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -968,37 +1972,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1006,43 +2019,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1050,44 +2072,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1095,27 +2124,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1123,37 +2159,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1161,22 +2206,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1184,41 +2234,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1226,20 +2287,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1247,46 +2313,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1294,41 +2368,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1336,41 +2421,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1378,37 +2474,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n40, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1416,73 +2521,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n41, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n41 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n42, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n42 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1490,57 +2610,66 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1548,37 +2677,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n44 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1590,6 +2732,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1604,6 +2749,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1616,6 +2764,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1632,6 +2783,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1646,6 +2800,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -1664,6 +2821,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -1687,6 +2847,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1699,6 +2862,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1711,6 +2877,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1729,6 +2898,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1743,6 +2915,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1768,6 +2943,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1789,6 +2967,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1801,6 +2982,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1813,6 +2997,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1829,6 +3016,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1843,6 +3033,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1859,6 +3052,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1876,6 +3072,9 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1886,6 +3085,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1900,6 +3102,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1909,6 +3114,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1921,6 +3129,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1928,6 +3139,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1945,6 +3159,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1957,6 +3174,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1973,6 +3193,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1987,6 +3210,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -2017,6 +3243,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -2041,6 +3270,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2053,14 +3285,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2070,8 +3295,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -2081,9 +3306,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2093,7 +3323,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2107,7 +3337,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2118,9 +3348,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2130,8 +3365,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2143,6 +3378,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -2153,7 +3393,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2164,7 +3404,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -2174,7 +3414,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2190,8 +3430,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2200,9 +3440,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2213,8 +3458,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2228,13 +3473,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2247,7 +3497,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2257,7 +3507,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2271,7 +3521,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2282,9 +3532,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2295,8 +3550,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2306,13 +3561,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2322,7 +3582,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2332,8 +3592,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2353,7 +3613,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2397,7 +3657,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2411,7 +3671,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2422,9 +3682,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2433,11 +3698,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2450,6 +3720,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2459,7 +3734,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2470,7 +3745,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2498,7 +3773,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2526,7 +3801,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2535,6 +3810,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2556,7 +3834,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2565,6 +3843,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2586,7 +3867,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2600,6 +3881,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2627,7 +3911,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2655,7 +3939,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2664,6 +3948,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2685,7 +3972,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2694,6 +3981,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2711,6 +4001,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2738,7 +4031,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2766,7 +4059,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2775,6 +4068,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2796,7 +4092,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2805,6 +4101,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2826,7 +4125,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2835,6 +4134,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2851,6 +4153,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2878,7 +4183,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2906,7 +4211,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2916,6 +4221,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2935,7 +4243,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2945,6 +4253,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2964,7 +4275,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2973,6 +4284,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2994,7 +4308,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3004,6 +4318,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3023,7 +4340,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3033,6 +4350,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3047,6 +4367,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3074,7 +4397,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3102,7 +4425,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3111,6 +4434,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3132,7 +4458,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3141,6 +4467,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3158,6 +4487,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3185,7 +4517,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3213,7 +4545,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3222,11 +4554,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3246,7 +4581,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3255,6 +4590,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3276,7 +4614,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3285,6 +4623,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3306,7 +4647,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3325,7 +4666,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3340,6 +4681,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3367,7 +4711,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3395,7 +4739,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3414,7 +4758,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3433,7 +4777,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3452,7 +4796,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3471,7 +4815,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3490,7 +4834,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3509,7 +4853,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3528,7 +4872,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3547,7 +4891,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3567,7 +4911,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3576,6 +4920,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3593,6 +4940,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3620,7 +4970,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3648,7 +4998,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3658,6 +5008,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3677,7 +5030,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3686,6 +5039,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3705,6 +5061,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3732,7 +5091,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3760,7 +5119,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3769,6 +5128,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3790,7 +5152,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3799,6 +5161,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3820,7 +5185,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3829,6 +5194,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3845,6 +5213,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3872,7 +5243,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3900,7 +5271,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3910,6 +5281,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3929,7 +5303,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3939,6 +5313,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3958,7 +5335,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3968,6 +5345,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3987,7 +5367,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3997,6 +5377,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4016,7 +5399,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4025,6 +5408,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4046,7 +5432,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4055,6 +5441,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4071,6 +5460,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4098,7 +5490,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4126,7 +5518,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4135,6 +5527,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4156,7 +5551,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4165,6 +5560,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4182,6 +5580,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4209,7 +5610,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4237,7 +5638,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4257,7 +5658,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4266,11 +5667,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4290,7 +5694,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4299,6 +5703,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4320,7 +5727,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4329,6 +5736,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4350,7 +5760,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4369,7 +5779,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4389,7 +5799,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4409,7 +5819,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4424,6 +5834,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4451,7 +5864,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4479,7 +5892,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4498,7 +5911,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4517,7 +5930,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4536,7 +5949,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4555,7 +5968,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4574,7 +5987,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4583,6 +5996,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4605,7 +6021,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4624,7 +6040,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4639,6 +6055,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4666,7 +6085,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4694,7 +6113,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4704,6 +6123,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4723,7 +6145,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4732,6 +6154,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4751,6 +6176,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4778,7 +6206,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4806,7 +6234,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4815,6 +6243,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4836,7 +6267,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4845,6 +6276,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4866,7 +6300,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4875,6 +6309,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4891,6 +6328,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4918,7 +6358,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4946,7 +6386,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4956,6 +6396,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4975,7 +6418,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4985,6 +6428,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5004,7 +6450,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5013,6 +6459,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5034,7 +6483,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5044,6 +6493,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5063,7 +6515,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5073,6 +6525,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5087,6 +6542,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5114,7 +6572,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5142,7 +6600,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5151,6 +6609,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5172,7 +6633,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5181,6 +6642,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5198,6 +6662,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5225,7 +6692,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5253,7 +6720,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5273,7 +6740,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5282,11 +6749,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5306,7 +6776,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5315,6 +6785,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5336,7 +6809,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5350,6 +6823,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5377,7 +6853,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5405,7 +6881,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5424,7 +6900,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5443,7 +6919,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5462,7 +6938,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5481,7 +6957,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5500,7 +6976,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5509,6 +6985,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5526,6 +7005,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5553,7 +7035,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5581,7 +7063,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5590,11 +7072,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5609,6 +7094,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5636,7 +7124,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5664,7 +7152,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5673,11 +7161,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5697,7 +7188,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5706,11 +7197,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5725,6 +7219,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5752,7 +7249,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5780,7 +7277,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5795,6 +7292,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5822,7 +7322,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5850,7 +7350,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5859,6 +7359,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5880,7 +7383,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5889,6 +7392,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5910,7 +7416,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5919,6 +7425,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5935,6 +7444,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5962,7 +7474,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5990,7 +7502,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6004,6 +7516,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6031,7 +7546,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6059,7 +7574,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6078,7 +7593,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6087,6 +7602,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6107,7 +7625,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6124,7 +7642,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6134,6 +7652,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6150,7 +7671,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6160,6 +7681,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6196,7 +7720,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6206,6 +7730,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6220,6 +7747,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6247,7 +7777,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6275,7 +7805,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6284,6 +7814,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6305,7 +7838,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6314,6 +7847,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6335,7 +7871,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6344,6 +7880,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6360,6 +7899,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6387,7 +7929,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6415,7 +7957,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6425,6 +7967,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6444,7 +7989,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6454,6 +7999,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6473,7 +8021,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6482,6 +8030,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6503,7 +8054,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6513,6 +8064,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6532,7 +8086,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6542,6 +8096,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6556,6 +8113,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6583,7 +8143,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6611,7 +8171,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6620,6 +8180,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6641,7 +8204,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6650,6 +8213,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6667,6 +8233,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6694,7 +8263,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6722,7 +8291,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6742,7 +8311,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6751,11 +8320,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6775,7 +8347,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6784,6 +8356,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6805,7 +8380,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6814,10 +8389,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6836,7 +8414,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6846,6 +8424,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6865,7 +8446,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6875,6 +8456,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6894,7 +8478,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6903,6 +8487,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6924,7 +8511,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6939,6 +8526,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6966,7 +8556,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6994,7 +8584,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7013,7 +8603,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7032,7 +8622,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7051,7 +8641,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7070,7 +8660,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7089,7 +8679,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7099,6 +8689,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7118,7 +8711,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7128,6 +8721,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7147,7 +8743,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7167,7 +8763,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7176,6 +8772,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7193,6 +8792,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7220,7 +8822,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7248,7 +8850,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7258,6 +8860,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7277,7 +8882,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7286,6 +8891,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7305,6 +8913,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7371,10 +8982,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -7403,6 +9017,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -7421,147 +9038,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2176 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, - 0x19, 0xd7, 0xec, 0x43, 0x5a, 0x51, 0x91, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, - 0x1c, 0x25, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, - 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, - 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, - 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, - 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, - 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xfc, 0xb6, 0xab, 0x6a, - 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, - 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0x8d, 0x43, 0x42, 0xf1, 0x5a, 0xab, 0x47, - 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, - 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, - 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc8, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x35, - 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x92, 0x63, 0x2d, 0xbd, 0x11, 0x61, 0x0c, 0xdc, - 0x39, 0xd2, 0x4c, 0xe2, 0x0c, 0x5a, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2d, 0x83, 0x50, 0x9c, 0x65, - 0xd5, 0xca, 0xb3, 0x72, 0x3c, 0x93, 0x6a, 0x06, 0x49, 0x19, 0xbc, 0x35, 0xca, 0xc0, 0xed, 0x1c, - 0x11, 0x03, 0xa7, 0xec, 0x5e, 0xcf, 0xb3, 0xf3, 0xa8, 0xa6, 0xb7, 0x34, 0x93, 0xba, 0xd4, 0x49, - 0x1a, 0x35, 0xff, 0xa3, 0x00, 0xb8, 0x61, 0x99, 0xd4, 0xb1, 0x74, 0x9d, 0x38, 0x88, 0xf4, 0x35, - 0x57, 0xb3, 0x4c, 0xf8, 0x00, 0xd4, 0xd8, 0x7c, 0xba, 0x98, 0xe2, 0xba, 0x72, 0x59, 0x59, 0x99, - 0x5a, 0xbb, 0xae, 0x46, 0x2b, 0x1d, 0xd2, 0xab, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2a, 0x43, 0xab, - 0xfd, 0x1b, 0xea, 0xee, 0xe1, 0x87, 0xa4, 0x43, 0xb7, 0x09, 0xc5, 0x6d, 0xf8, 0xf8, 0x64, 0x79, - 0x6c, 0x78, 0xb2, 0x0c, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x5d, 0x50, 0xe1, 0xec, 0x25, 0xce, 0xbe, - 0x9a, 0xcb, 0x2e, 0x26, 0xad, 0x22, 0xfc, 0x93, 0x77, 0x1f, 0x51, 0x62, 0x32, 0xf7, 0xda, 0x2f, - 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x0d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, 0x7c, - 0x59, 0x59, 0x29, 0xb7, 0x2f, 0x08, 0x54, 0x2d, 0x98, 0x16, 0x0a, 0x11, 0xcd, 0xc7, 0x0a, 0x58, - 0x48, 0xcf, 0x7b, 0x4b, 0x73, 0x29, 0xfc, 0x61, 0x6a, 0xee, 0x6a, 0xb1, 0xb9, 0x33, 0x6b, 0x3e, - 0xf3, 0x70, 0xe0, 0xa0, 0x25, 0x36, 0xef, 0x3d, 0x50, 0xd5, 0x28, 0x31, 0xdc, 0x7a, 0xe9, 0x72, - 0x79, 0x65, 0x6a, 0xed, 0xaa, 0x9a, 0x13, 0xc0, 0x6a, 0xda, 0xbb, 0xf6, 0xb4, 0xe0, 0xad, 0xde, - 0x65, 0x0c, 0xc8, 0x27, 0x6a, 0xfe, 0xa2, 0x04, 0x26, 0x37, 0x31, 0x31, 0x2c, 0x73, 0x9f, 0xd0, - 0x73, 0xd8, 0xb9, 0x3b, 0xa0, 0xe2, 0xda, 0xa4, 0x23, 0x76, 0xee, 0x4a, 0xee, 0x04, 0x42, 0x9f, - 0xf6, 0x6d, 0xd2, 0x89, 0xb6, 0x8c, 0x3d, 0x21, 0xce, 0x00, 0xf7, 0xc0, 0xb8, 0x4b, 0x31, 0xf5, - 0x5c, 0xbe, 0x61, 0x53, 0x6b, 0x2b, 0x05, 0xb8, 0x38, 0xbe, 0x3d, 0x23, 0xd8, 0xc6, 0xfd, 0x67, - 0x24, 0x78, 0x9a, 0xff, 0x28, 0x01, 0x18, 0x62, 0x37, 0x2c, 0xb3, 0xab, 0x51, 0x16, 0xce, 0x37, - 0x41, 0x85, 0x0e, 0x6c, 0xc2, 0x17, 0x64, 0xb2, 0x7d, 0x25, 0x70, 0xe5, 0xde, 0xc0, 0x26, 0x9f, - 0x9f, 0x2c, 0x2f, 0xa4, 0x2d, 0x58, 0x0f, 0xe2, 0x36, 0x70, 0x2b, 0x74, 0xb2, 0xc4, 0xad, 0xdf, - 0x90, 0x87, 0xfe, 0xfc, 0x64, 0x39, 0xe3, 0xec, 0x50, 0x43, 0x26, 0xd9, 0x41, 0xd8, 0x07, 0x50, - 0xc7, 0x2e, 0xbd, 0xe7, 0x60, 0xd3, 0xf5, 0x47, 0xd2, 0x0c, 0x22, 0xa6, 0xff, 0x5a, 0xb1, 0x8d, - 0x62, 0x16, 0xed, 0x25, 0xe1, 0x05, 0xdc, 0x4a, 0xb1, 0xa1, 0x8c, 0x11, 0xe0, 0x15, 0x30, 0xee, - 0x10, 0xec, 0x5a, 0x66, 0xbd, 0xc2, 0x67, 0x11, 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x57, - 0xc1, 0x84, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xbd, 0xca, 0x81, 0xb3, 0x02, 0x38, 0xb1, 0xed, 0x37, - 0xa3, 0xa0, 0xbf, 0xf9, 0x7b, 0x05, 0x4c, 0x87, 0x2b, 0x77, 0x0e, 0x99, 0x73, 0x5b, 0xce, 0x9c, - 0xe6, 0xe8, 0x60, 0xc9, 0x49, 0x98, 0x8f, 0xca, 0x31, 0xc7, 0x59, 0x38, 0xc2, 0x1f, 0x81, 0x9a, - 0x4b, 0x74, 0xd2, 0xa1, 0x96, 0x23, 0x1c, 0x7f, 0xbd, 0xa0, 0xe3, 0xf8, 0x90, 0xe8, 0xfb, 0xc2, - 0xb4, 0xfd, 0x02, 0xf3, 0x3c, 0x78, 0x42, 0x21, 0x25, 0x7c, 0x1f, 0xd4, 0x28, 0x31, 0x6c, 0x1d, - 0x53, 0x22, 0xb2, 0xe6, 0xa5, 0xb8, 0xf3, 0x2c, 0x66, 0x18, 0xd9, 0x9e, 0xd5, 0xbd, 0x27, 0x60, - 0x3c, 0x65, 0xc2, 0xc5, 0x08, 0x5a, 0x51, 0x48, 0x03, 0x6d, 0x30, 0xe3, 0xd9, 0x5d, 0x86, 0xa4, - 0xec, 0x38, 0xef, 0x0d, 0x44, 0x0c, 0x5d, 0x1f, 0xbd, 0x2a, 0x07, 0x92, 0x5d, 0x7b, 0x41, 0x8c, - 0x32, 0x23, 0xb7, 0xa3, 0x04, 0x3f, 0x5c, 0x07, 0xb3, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, - 0xd2, 0xb1, 0xcc, 0xae, 0xcb, 0x43, 0xa9, 0xda, 0x5e, 0x14, 0x04, 0xb3, 0xdb, 0x72, 0x37, 0x4a, - 0xe2, 0xe1, 0x16, 0x98, 0x0f, 0x0e, 0xe0, 0x3b, 0x9a, 0x4b, 0x2d, 0x67, 0xb0, 0xa5, 0x19, 0x1a, - 0xad, 0x8f, 0x73, 0x9e, 0xfa, 0xf0, 0x64, 0x79, 0x1e, 0x65, 0xf4, 0xa3, 0x4c, 0xab, 0xe6, 0x6f, - 0xc6, 0xc1, 0x6c, 0xe2, 0x5c, 0x80, 0xf7, 0xc1, 0x42, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, - 0x38, 0x24, 0xce, 0x7e, 0xe7, 0x88, 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0xe1, - 0xeb, 0xc2, 0x46, 0x26, 0x0a, 0xe5, 0x58, 0xc3, 0xf7, 0x00, 0x34, 0x79, 0xd3, 0xb6, 0xe6, 0xba, - 0x21, 0x67, 0x89, 0x73, 0x86, 0xa9, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2b, 0xe6, 0x63, 0x97, 0xb8, - 0x9a, 0x43, 0xba, 0x49, 0x1f, 0xcb, 0xb2, 0x8f, 0x9b, 0x99, 0x28, 0x94, 0x63, 0x0d, 0xdf, 0x04, - 0x53, 0xfe, 0x68, 0x7c, 0xcd, 0xc5, 0xe6, 0xcc, 0x09, 0xb2, 0xa9, 0x9d, 0xa8, 0x0b, 0xc5, 0x71, - 0x6c, 0x6a, 0xd6, 0xa1, 0x4b, 0x9c, 0x3e, 0xe9, 0xde, 0xf6, 0xc5, 0x01, 0xab, 0xa0, 0x55, 0x5e, - 0x41, 0xc3, 0xa9, 0xed, 0xa6, 0x10, 0x28, 0xc3, 0x8a, 0x4d, 0xcd, 0x8f, 0x9a, 0xd4, 0xd4, 0xc6, - 0xe5, 0xa9, 0x1d, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xf5, 0x3e, 0xd6, 0x74, - 0x7c, 0xa8, 0x93, 0xfa, 0x84, 0x1c, 0x7b, 0x3b, 0x72, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x70, 0xd1, - 0x6f, 0x3a, 0x30, 0x71, 0x48, 0x52, 0xe3, 0x24, 0x2f, 0x0a, 0x92, 0x8b, 0x3b, 0x49, 0x00, 0x4a, - 0xdb, 0xc0, 0x9b, 0x60, 0xa6, 0x63, 0xe9, 0x3a, 0x8f, 0xc7, 0x0d, 0xcb, 0x33, 0x69, 0x7d, 0x92, - 0xb3, 0x40, 0x96, 0x43, 0x1b, 0x52, 0x0f, 0x4a, 0x20, 0xe1, 0x8f, 0x01, 0xe8, 0x04, 0x85, 0xc1, - 0xad, 0x83, 0x11, 0x0a, 0x20, 0x5d, 0x96, 0xa2, 0xca, 0x1c, 0x36, 0xb9, 0x28, 0x46, 0xd9, 0xfc, - 0x48, 0x01, 0x8b, 0x39, 0x89, 0x0e, 0xbf, 0x2b, 0x15, 0xc1, 0xab, 0x89, 0x22, 0x78, 0x29, 0xc7, - 0x2c, 0x56, 0x09, 0x8f, 0xc0, 0x34, 0x13, 0x24, 0x9a, 0xd9, 0xf3, 0x21, 0xe2, 0x2c, 0x6b, 0xe5, - 0x4e, 0x00, 0xc5, 0xd1, 0xd1, 0xa9, 0x7c, 0x71, 0x78, 0xb2, 0x3c, 0x2d, 0xf5, 0x21, 0x99, 0xb8, - 0xf9, 0xcb, 0x12, 0x00, 0x9b, 0xc4, 0xd6, 0xad, 0x81, 0x41, 0xcc, 0xf3, 0xd0, 0x34, 0x77, 0x25, - 0x4d, 0xf3, 0x4a, 0xfe, 0x96, 0x84, 0x4e, 0xe5, 0x8a, 0x9a, 0xf7, 0x13, 0xa2, 0xe6, 0xd5, 0x22, - 0x64, 0x4f, 0x57, 0x35, 0x9f, 0x94, 0xc1, 0x5c, 0x04, 0x8e, 0x64, 0xcd, 0x2d, 0x69, 0x47, 0x5f, - 0x49, 0xec, 0xe8, 0x62, 0x86, 0xc9, 0x73, 0xd3, 0x35, 0xcf, 0x5e, 0x5f, 0xc0, 0x0f, 0xc1, 0x0c, - 0x13, 0x32, 0x7e, 0x48, 0x70, 0x99, 0x34, 0x7e, 0x6a, 0x99, 0x14, 0x16, 0xb7, 0x2d, 0x89, 0x09, - 0x25, 0x98, 0x73, 0x64, 0xd9, 0xc4, 0xf3, 0x96, 0x65, 0xcd, 0x3f, 0x28, 0x60, 0x26, 0xda, 0xa6, - 0x73, 0x10, 0x51, 0x77, 0x64, 0x11, 0xf5, 0x52, 0x81, 0xe0, 0xcc, 0x51, 0x51, 0x9f, 0x54, 0xe2, - 0xae, 0x73, 0x19, 0xb5, 0xc2, 0x5e, 0xc1, 0x6c, 0x5d, 0xeb, 0x60, 0x57, 0xd4, 0xdb, 0x17, 0xfc, - 0xd7, 0x2f, 0xbf, 0x0d, 0x85, 0xbd, 0x92, 0xe0, 0x2a, 0x3d, 0x5f, 0xc1, 0x55, 0x7e, 0x36, 0x82, - 0xeb, 0x07, 0xa0, 0xe6, 0x06, 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x16, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, - 0xea, 0x50, 0x5f, 0x85, 0x74, 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, - 0x5c, 0xd2, 0xe5, 0x19, 0x50, 0x8b, 0x92, 0x79, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, - 0xed, 0x58, 0x3d, 0x87, 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, - 0x5e, 0x1a, 0x9e, 0x2c, 0x2f, 0xee, 0x65, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, - 0x79, 0x36, 0xe6, 0xc8, 0x14, 0xe5, 0x4c, 0x32, 0xe5, 0x5a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, - 0x2a, 0x48, 0xc5, 0xea, 0x3a, 0x98, 0x15, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x03, - 0xb9, 0x1b, 0x25, 0xf1, 0x4c, 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x3d, 0x09, - 0x40, 0x69, 0x1b, 0xb8, 0x0d, 0xe6, 0x3c, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0x77, - 0x90, 0x86, 0xa0, 0x2c, 0x3b, 0xf8, 0x40, 0xd2, 0x23, 0xe3, 0xfc, 0x48, 0xb8, 0x56, 0x20, 0xac, - 0x0b, 0x0b, 0x12, 0x78, 0x0b, 0x4c, 0x3b, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, - 0x6c, 0x1a, 0xc5, 0x3b, 0x91, 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, - 0xc0, 0x74, 0x1e, 0x8e, 0xbc, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0xae, 0x17, - 0xd4, 0x3f, 0xd1, 0x81, 0x5a, 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9f, 0x4b, 0x9d, 0xa2, 0x02, 0x28, - 0x72, 0xea, 0x19, 0x08, 0xa0, 0x18, 0xd9, 0xd3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x2e, 0x02, 0x17, - 0x16, 0x40, 0x19, 0x26, 0x5f, 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, - 0x27, 0x51, 0x12, 0x79, 0x95, 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, - 0xfc, 0x4e, 0xa6, 0xf9, 0x97, 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, - 0x1e, 0x98, 0x7f, 0xe8, 0xe9, 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, - 0x72, 0xfe, 0xfb, 0x19, 0x18, 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0x67, 0x2a, 0xf6, 0xa9, 0x0a, - 0x54, 0x39, 0x45, 0x05, 0xca, 0x2c, 0xdc, 0xd5, 0x33, 0x14, 0xee, 0xd3, 0x55, 0xda, 0x8c, 0x83, - 0x6b, 0xe4, 0xab, 0xff, 0xcf, 0x15, 0xb0, 0x90, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x63, 0xe0, 0x47, - 0xf1, 0x8b, 0x8f, 0x51, 0x45, 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0xbb, 0x26, 0xdd, - 0x75, 0xf6, 0xa9, 0xa3, 0x99, 0x3d, 0xbf, 0xf2, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, - 0x01, 0x8b, 0x39, 0x95, 0xef, 0x7c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xed, 0x7b, 0x4e, - 0x2f, 0xab, 0x56, 0x17, 0x1b, 0x87, 0x67, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0x5c, - 0x96, 0x26, 0xc9, 0x32, 0x87, 0x3c, 0xf4, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x05, 0x93, 0x36, - 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, - 0xbf, 0xf9, 0x5f, 0x05, 0x54, 0xf7, 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, - 0x7f, 0x93, 0xce, 0xfd, 0xc9, 0x2d, 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xf2, 0x08, 0x9e, 0xa7, 0xd7, - 0xf8, 0x77, 0xc0, 0x64, 0x38, 0xdc, 0xe9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x2a, 0x36, 0xc4, - 0x29, 0x8f, 0xaf, 0x07, 0xd2, 0xb1, 0xcf, 0x12, 0x73, 0xad, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, - 0xd7, 0xa4, 0x4e, 0xfc, 0x05, 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0xcc, 0x50, 0xec, 0xf4, 0x08, 0x0d, - 0xfa, 0xf8, 0x82, 0x4d, 0x46, 0xb7, 0x13, 0xf7, 0xa4, 0x5e, 0x94, 0x40, 0x2f, 0xdd, 0x02, 0xd3, - 0xd2, 0x60, 0xf0, 0x02, 0x28, 0x1f, 0x93, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x41, 0xb5, - 0x8f, 0x75, 0xcf, 0x8f, 0xf3, 0x49, 0xe4, 0x3f, 0xdc, 0x2c, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, - 0x27, 0x0a, 0xce, 0x73, 0x88, 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, - 0x18, 0x43, 0x89, 0x18, 0x7b, 0xad, 0x10, 0xdb, 0xd3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x1f, 0x43, - 0x47, 0x72, 0xf2, 0xdb, 0x92, 0x9c, 0x5c, 0x49, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, - 0xb4, 0x9e, 0xfc, 0xa3, 0x02, 0x66, 0x63, 0x6b, 0x77, 0x0e, 0x82, 0xf2, 0xae, 0x2c, 0x28, 0x5f, - 0x2e, 0x12, 0x34, 0x39, 0x8a, 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, - 0xef, 0x5b, 0xba, 0x67, 0x90, 0x0d, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, - 0x0b, 0xe9, 0x89, 0xe3, 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x1f, 0x59, 0x46, 0xba, 0xef, 0x7e, 0x06, - 0x1d, 0xca, 0x1c, 0x04, 0xbe, 0x09, 0xa6, 0x98, 0x7e, 0xd3, 0x3a, 0x64, 0x07, 0x1b, 0x41, 0x60, - 0x85, 0x9f, 0xb0, 0xf6, 0xa3, 0x2e, 0x14, 0xc7, 0xc1, 0x23, 0x30, 0x67, 0x5b, 0xdd, 0x6d, 0x6c, - 0xe2, 0x1e, 0x61, 0x32, 0x63, 0xcf, 0xd2, 0xb5, 0xce, 0x80, 0x5f, 0x7e, 0x4d, 0xb6, 0xdf, 0x0a, - 0x6e, 0x45, 0xf6, 0xd2, 0x10, 0xf6, 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, - 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x5a, 0x91, 0x08, 0x3b, 0xe3, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, - 0x99, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, - 0xcb, 0xa7, 0x90, 0xe7, 0xeb, 0x60, 0x56, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x43, - 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, 0x5e, 0xf5, 0x94, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, - 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, - 0xb2, 0x42, 0x38, 0x90, 0x7a, 0x51, 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, - 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, 0xdf, 0x14, 0xf0, 0x62, 0x6e, 0x22, 0xc0, 0x75, 0xa9, 0xec, - 0xae, 0x26, 0xca, 0xee, 0xb7, 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, - 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, 0x47, 0xd7, 0x5e, 0x7d, 0xfc, 0xa4, 0x31, 0xf6, 0xf1, 0x93, - 0xc6, 0xd8, 0xa7, 0x4f, 0x1a, 0x63, 0x3f, 0x1b, 0x36, 0x94, 0xc7, 0xc3, 0x86, 0xf2, 0xf1, 0xb0, - 0xa1, 0x7c, 0x3a, 0x6c, 0x28, 0x7f, 0x1f, 0x36, 0x94, 0x5f, 0x7f, 0xd6, 0x18, 0xfb, 0x60, 0x42, - 0x8c, 0xf8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x85, 0x43, 0x0a, 0xec, 0x28, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 39e07e27..d358455f 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -141,13 +141,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 8a0bad22..127bf095 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -720,7 +720,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go index 399d92b3..003cc30b 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go @@ -17,32 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto - - It has these top-level messages: - AuditSink - AuditSinkList - AuditSinkSpec - Policy - ServiceReference - Webhook - WebhookClientConfig - WebhookThrottleConfig -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -55,37 +43,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AuditSink) Reset() { *m = AuditSink{} } -func (*AuditSink) ProtoMessage() {} -func (*AuditSink) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AuditSink) Reset() { *m = AuditSink{} } +func (*AuditSink) ProtoMessage() {} +func (*AuditSink) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{0} +} +func (m *AuditSink) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSink) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSink.Merge(m, src) +} +func (m *AuditSink) XXX_Size() int { + return m.Size() +} +func (m *AuditSink) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSink.DiscardUnknown(m) +} -func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } -func (*AuditSinkList) ProtoMessage() {} -func (*AuditSinkList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_AuditSink proto.InternalMessageInfo -func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } -func (*AuditSinkSpec) ProtoMessage() {} -func (*AuditSinkSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } +func (*AuditSinkList) ProtoMessage() {} +func (*AuditSinkList) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{1} +} +func (m *AuditSinkList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkList.Merge(m, src) +} +func (m *AuditSinkList) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkList) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkList.DiscardUnknown(m) +} -func (m *Policy) Reset() { *m = Policy{} } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_AuditSinkList proto.InternalMessageInfo -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } +func (*AuditSinkSpec) ProtoMessage() {} +func (*AuditSinkSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{2} +} +func (m *AuditSinkSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkSpec.Merge(m, src) +} +func (m *AuditSinkSpec) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkSpec) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkSpec.DiscardUnknown(m) +} -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_AuditSinkSpec proto.InternalMessageInfo -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{3} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} -func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } -func (*WebhookThrottleConfig) ProtoMessage() {} -func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{4} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{5} +} +func (m *Webhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Webhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Webhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_Webhook.Merge(m, src) +} +func (m *Webhook) XXX_Size() int { + return m.Size() +} +func (m *Webhook) XXX_DiscardUnknown() { + xxx_messageInfo_Webhook.DiscardUnknown(m) +} + +var xxx_messageInfo_Webhook proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{6} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } +func (*WebhookThrottleConfig) ProtoMessage() {} +func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{7} +} +func (m *WebhookThrottleConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookThrottleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookThrottleConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookThrottleConfig.Merge(m, src) +} +func (m *WebhookThrottleConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookThrottleConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookThrottleConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookThrottleConfig proto.InternalMessageInfo func init() { proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") @@ -97,10 +277,67 @@ func init() { proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptor_642d3597c6afa8ba) +} + +var fileDescriptor_642d3597c6afa8ba = []byte{ + // 765 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x13, 0x47, + 0x14, 0xf6, 0xc6, 0x76, 0x6c, 0x4f, 0x9c, 0x36, 0x9d, 0xb4, 0x95, 0x1b, 0x55, 0x6b, 0x6b, 0xa5, + 0x4a, 0x91, 0xda, 0xcc, 0x36, 0x55, 0xd4, 0x56, 0x88, 0x4b, 0x36, 0x27, 0xa4, 0x10, 0xc2, 0x98, + 0x80, 0x40, 0x08, 0x31, 0x5e, 0x3f, 0xef, 0x0e, 0xb6, 0x77, 0x97, 0xdd, 0x59, 0xa3, 0xdc, 0xf8, + 0x09, 0xfc, 0x05, 0xfe, 0x06, 0x37, 0x24, 0x90, 0x72, 0xcc, 0x31, 0xa7, 0x88, 0x98, 0x03, 0xff, + 0x81, 0x13, 0x9a, 0xd9, 0x59, 0xdb, 0xc4, 0x41, 0x38, 0xb7, 0x79, 0xdf, 0x7b, 0xdf, 0xf7, 0xbe, + 0xf7, 0xde, 0xa0, 0x83, 0xfe, 0xff, 0x09, 0xe1, 0xa1, 0xdd, 0x4f, 0x3b, 0x10, 0x07, 0x20, 0x20, + 0xb1, 0x47, 0x10, 0x74, 0xc3, 0xd8, 0xd6, 0x09, 0x16, 0x71, 0x9b, 0xa5, 0x5d, 0x2e, 0x62, 0xf0, + 0x78, 0x22, 0x62, 0x26, 0x78, 0x18, 0xd8, 0xa3, 0x6d, 0x36, 0x88, 0x7c, 0xb6, 0x6d, 0x7b, 0x10, + 0x40, 0xcc, 0x04, 0x74, 0x49, 0x14, 0x87, 0x22, 0xc4, 0x7f, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0xe6, + 0x68, 0x24, 0xa7, 0x6d, 0x6c, 0x79, 0x5c, 0xf8, 0x69, 0x87, 0xb8, 0xe1, 0xd0, 0xf6, 0x42, 0x2f, + 0xb4, 0x15, 0xbb, 0x93, 0xf6, 0x54, 0xa4, 0x02, 0xf5, 0xca, 0x54, 0x37, 0x76, 0xa6, 0x66, 0x86, + 0xcc, 0xf5, 0x79, 0x00, 0xf1, 0xb1, 0x1d, 0xf5, 0x3d, 0x09, 0x24, 0xf6, 0x10, 0x04, 0xb3, 0x47, + 0x73, 0x5e, 0x36, 0xec, 0x6f, 0xb1, 0xe2, 0x34, 0x10, 0x7c, 0x08, 0x73, 0x84, 0x7f, 0xbf, 0x47, + 0x48, 0x5c, 0x1f, 0x86, 0xec, 0x32, 0xcf, 0x7a, 0x6f, 0xa0, 0xda, 0xae, 0x1c, 0xb6, 0xcd, 0x83, + 0x3e, 0x7e, 0x8a, 0xaa, 0xd2, 0x51, 0x97, 0x09, 0xd6, 0x30, 0x5a, 0xc6, 0xe6, 0xca, 0x3f, 0x7f, + 0x93, 0xe9, 0x56, 0x26, 0xc2, 0x24, 0xea, 0x7b, 0x12, 0x48, 0x88, 0xac, 0x26, 0xa3, 0x6d, 0x72, + 0xa7, 0xf3, 0x0c, 0x5c, 0x71, 0x1b, 0x04, 0x73, 0xf0, 0xc9, 0x79, 0xb3, 0x30, 0x3e, 0x6f, 0xa2, + 0x29, 0x46, 0x27, 0xaa, 0xf8, 0x3e, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x25, 0xa5, 0xbe, 0x43, 0x16, + 0xda, 0x39, 0x99, 0x38, 0x6c, 0x47, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x23, 0xaa, 0xf4, 0xac, + 0x77, 0x06, 0x5a, 0x9d, 0x54, 0xed, 0xf3, 0x44, 0xe0, 0xc7, 0x73, 0xb3, 0x90, 0xc5, 0x66, 0x91, + 0x6c, 0x35, 0xc9, 0x9a, 0xee, 0x53, 0xcd, 0x91, 0x99, 0x39, 0x8e, 0x50, 0x99, 0x0b, 0x18, 0x26, + 0x8d, 0xa5, 0x56, 0xf1, 0xd2, 0x9a, 0x16, 0x1a, 0xc4, 0x59, 0xd5, 0xe2, 0xe5, 0x5b, 0x52, 0x86, + 0x66, 0x6a, 0xd6, 0xdb, 0xd9, 0x31, 0xe4, 0x78, 0xf8, 0x08, 0x2d, 0x47, 0xe1, 0x80, 0xbb, 0xc7, + 0x7a, 0x88, 0xad, 0x05, 0x3b, 0x1d, 0x2a, 0x92, 0xf3, 0x83, 0x6e, 0xb3, 0x9c, 0xc5, 0x54, 0x8b, + 0xe1, 0x87, 0xa8, 0xf2, 0x02, 0x3a, 0x7e, 0x18, 0xf6, 0xf5, 0x29, 0xc8, 0x82, 0xba, 0x0f, 0x32, + 0x96, 0xf3, 0xa3, 0x16, 0xae, 0x68, 0x80, 0xe6, 0x7a, 0x96, 0x8b, 0x74, 0x33, 0xfc, 0x17, 0x2a, + 0x0f, 0x60, 0x04, 0x03, 0x65, 0xbd, 0xe6, 0xfc, 0x9a, 0x8f, 0xbc, 0x2f, 0xc1, 0xcf, 0xf9, 0x83, + 0x66, 0x45, 0xf8, 0x4f, 0xb4, 0x9c, 0x08, 0xe6, 0x41, 0xb6, 0xd3, 0x9a, 0xb3, 0x2e, 0x6d, 0xb7, + 0x15, 0x22, 0x6b, 0xd5, 0x8b, 0xea, 0x12, 0xeb, 0xb5, 0x81, 0xd6, 0xda, 0x10, 0x8f, 0xb8, 0x0b, + 0x14, 0x7a, 0x10, 0x43, 0xe0, 0x02, 0xb6, 0x51, 0x2d, 0x60, 0x43, 0x48, 0x22, 0xe6, 0x82, 0xee, + 0xf9, 0x93, 0xee, 0x59, 0x3b, 0xc8, 0x13, 0x74, 0x5a, 0x83, 0x5b, 0xa8, 0x24, 0x03, 0xb5, 0x82, + 0xda, 0xf4, 0x5f, 0xc9, 0x5a, 0xaa, 0x32, 0xf8, 0x77, 0x54, 0x8a, 0x98, 0xf0, 0x1b, 0x45, 0x55, + 0x51, 0x95, 0xd9, 0x43, 0x26, 0x7c, 0xaa, 0x50, 0x95, 0x0d, 0x63, 0xd1, 0x28, 0xb5, 0x8c, 0xcd, + 0xb2, 0xce, 0x86, 0xb1, 0xa0, 0x0a, 0xb5, 0x3e, 0x19, 0x28, 0xdf, 0x0e, 0xee, 0xa1, 0xaa, 0xf0, + 0xe3, 0x50, 0x88, 0x01, 0xe8, 0x43, 0xde, 0xbc, 0xde, 0xc2, 0xef, 0x69, 0xf6, 0x5e, 0x18, 0xf4, + 0xb8, 0xe7, 0xd4, 0xe5, 0xbf, 0xcc, 0x31, 0x3a, 0xd1, 0xc6, 0x02, 0xd5, 0xdd, 0x01, 0x87, 0x40, + 0x64, 0x75, 0xfa, 0xb8, 0x37, 0xae, 0xd7, 0x6b, 0x6f, 0x46, 0xc1, 0xf9, 0x59, 0x6f, 0xa5, 0x3e, + 0x8b, 0xd2, 0xaf, 0xba, 0x58, 0x6f, 0x0c, 0xb4, 0x7e, 0x05, 0x17, 0xff, 0x86, 0x8a, 0x69, 0x9c, + 0x9f, 0xbf, 0x32, 0x3e, 0x6f, 0x16, 0x8f, 0xe8, 0x3e, 0x95, 0x18, 0x7e, 0x82, 0x2a, 0x49, 0x76, + 0x3f, 0xed, 0xf1, 0xbf, 0x05, 0x3d, 0x5e, 0xbe, 0xba, 0xb3, 0x22, 0x7f, 0x61, 0x8e, 0xe6, 0xa2, + 0x78, 0x13, 0x55, 0x5d, 0xe6, 0xa4, 0x41, 0x77, 0x00, 0xea, 0x78, 0xf5, 0x6c, 0x65, 0x7b, 0xbb, + 0x19, 0x46, 0x27, 0x59, 0xab, 0x8d, 0x7e, 0xb9, 0x72, 0xc7, 0xd2, 0xfd, 0xf3, 0x28, 0x51, 0xee, + 0x8b, 0x99, 0xfb, 0xbb, 0x87, 0x6d, 0x2a, 0x31, 0xdc, 0x44, 0xe5, 0x4e, 0x1a, 0x27, 0x42, 0x79, + 0x2f, 0x3a, 0x35, 0xf9, 0xab, 0x1d, 0x09, 0xd0, 0x0c, 0x77, 0xc8, 0xc9, 0x85, 0x59, 0x38, 0xbd, + 0x30, 0x0b, 0x67, 0x17, 0x66, 0xe1, 0xe5, 0xd8, 0x34, 0x4e, 0xc6, 0xa6, 0x71, 0x3a, 0x36, 0x8d, + 0xb3, 0xb1, 0x69, 0x7c, 0x18, 0x9b, 0xc6, 0xab, 0x8f, 0x66, 0xe1, 0x51, 0x35, 0x9f, 0xea, 0x4b, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6c, 0xff, 0x86, 0xcd, 0x06, 0x00, 0x00, +} + func (m *AuditSink) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -108,33 +345,42 @@ func (m *AuditSink) Marshal() (dAtA []byte, err error) { } func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -142,37 +388,46 @@ func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { } func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -180,33 +435,42 @@ func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { } func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Policy.Size())) - n4, err := m.Policy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Webhook.Size())) - n5, err := m.Webhook.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Policy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -214,36 +478,36 @@ func (m *Policy) Marshal() (dAtA []byte, err error) { } func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) if len(m.Stages) > 0 { - for _, s := range m.Stages { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Stages[iNdEx]) + copy(dAtA[i:], m.Stages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stages[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -251,31 +515,44 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } if m.Path != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Webhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -283,35 +560,44 @@ func (m *Webhook) Marshal() (dAtA []byte, err error) { } func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Webhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Throttle != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Throttle.Size())) - n6, err := m.Throttle.MarshalTo(dAtA[i:]) + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n6 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Throttle != nil { + { + size, err := m.Throttle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n7 - return i, nil + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -319,39 +605,48 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x1a } if m.Service != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n8, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x12 } - if m.CABundle != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -359,33 +654,43 @@ func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookThrottleConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.QPS != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) - } if m.Burst != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) + i-- + dAtA[i] = 0x10 } - return i, nil + if m.QPS != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AuditSink) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -396,6 +701,9 @@ func (m *AuditSink) Size() (n int) { } func (m *AuditSinkList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -410,6 +718,9 @@ func (m *AuditSinkList) Size() (n int) { } func (m *AuditSinkSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Policy.Size() @@ -420,6 +731,9 @@ func (m *AuditSinkSpec) Size() (n int) { } func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Level) @@ -434,6 +748,9 @@ func (m *Policy) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -444,10 +761,16 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } return n } func (m *Webhook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Throttle != nil { @@ -460,6 +783,9 @@ func (m *Webhook) Size() (n int) { } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.URL != nil { @@ -478,6 +804,9 @@ func (m *WebhookClientConfig) Size() (n int) { } func (m *WebhookThrottleConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.QPS != nil { @@ -490,14 +819,7 @@ func (m *WebhookThrottleConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -507,7 +829,7 @@ func (this *AuditSink) String() string { return "nil" } s := strings.Join([]string{`&AuditSink{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -517,9 +839,14 @@ func (this *AuditSinkList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]AuditSink{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AuditSink", "AuditSink", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&AuditSinkList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "AuditSink", "AuditSink", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -554,6 +881,7 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, `}`, }, "") return s @@ -563,7 +891,7 @@ func (this *Webhook) String() string { return "nil" } s := strings.Join([]string{`&Webhook{`, - `Throttle:` + strings.Replace(fmt.Sprintf("%v", this.Throttle), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, + `Throttle:` + strings.Replace(this.Throttle.String(), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -575,7 +903,7 @@ func (this *WebhookClientConfig) String() string { } s := strings.Join([]string{`&WebhookClientConfig{`, `URL:` + valueToStringGenerated(this.URL) + `,`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `}`, }, "") @@ -615,7 +943,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -643,7 +971,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -652,6 +980,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -673,7 +1004,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -682,6 +1013,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -698,6 +1032,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -725,7 +1062,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -753,7 +1090,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -762,6 +1099,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -783,7 +1123,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -792,6 +1132,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -809,6 +1152,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -836,7 +1182,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -864,7 +1210,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -873,6 +1219,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -894,7 +1243,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -903,6 +1252,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -919,6 +1271,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -946,7 +1301,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -974,7 +1329,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -984,6 +1339,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1003,7 +1361,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1013,6 +1371,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1027,6 +1388,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1054,7 +1418,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1082,7 +1446,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1092,6 +1456,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1111,7 +1478,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1121,6 +1488,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1140,7 +1510,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1150,12 +1520,35 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.Path = &s iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1165,6 +1558,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1192,7 +1588,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1220,7 +1616,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1229,6 +1625,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1253,7 +1652,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1262,6 +1661,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1278,6 +1680,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1305,7 +1710,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1333,7 +1738,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1343,6 +1748,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1363,7 +1771,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1372,6 +1780,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1396,7 +1807,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1405,6 +1816,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1422,6 +1836,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1449,7 +1866,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1477,7 +1894,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1497,7 +1914,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1512,6 +1929,9 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1578,10 +1998,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1610,6 +2033,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1628,58 +2054,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 747 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0xd3, 0x48, - 0x14, 0x8e, 0x9b, 0xa4, 0x49, 0xa6, 0xe9, 0x6e, 0x77, 0xba, 0xbb, 0xca, 0x56, 0x2b, 0xa7, 0xb2, - 0xb4, 0x52, 0xa5, 0xdd, 0x8e, 0xb7, 0xa8, 0x02, 0x84, 0xb8, 0xd4, 0x3d, 0x21, 0x95, 0x52, 0x26, - 0x14, 0x04, 0x42, 0x88, 0x89, 0xf3, 0x62, 0x0f, 0x49, 0x6c, 0x63, 0x8f, 0x83, 0x7a, 0x43, 0xe2, - 0x0f, 0xf0, 0x7b, 0xb8, 0x21, 0x81, 0xd4, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0x1c, 0xf8, 0x0f, 0x9c, - 0xd0, 0x8c, 0xc7, 0x49, 0x68, 0x8a, 0x48, 0x6f, 0x33, 0xdf, 0xbc, 0xef, 0x7b, 0xdf, 0xf7, 0xde, - 0xa0, 0xfd, 0xde, 0xcd, 0x84, 0xf0, 0xd0, 0xee, 0xa5, 0x6d, 0x88, 0x03, 0x10, 0x90, 0xd8, 0x43, - 0x08, 0x3a, 0x61, 0x6c, 0xeb, 0x07, 0x16, 0x71, 0x9b, 0xa5, 0x1d, 0x2e, 0x62, 0xf0, 0x78, 0x22, - 0x62, 0x26, 0x78, 0x18, 0xd8, 0xc3, 0x2d, 0xd6, 0x8f, 0x7c, 0xb6, 0x65, 0x7b, 0x10, 0x40, 0xcc, - 0x04, 0x74, 0x48, 0x14, 0x87, 0x22, 0xc4, 0xff, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0x66, 0x68, 0x24, - 0xa7, 0xad, 0x6d, 0x7a, 0x5c, 0xf8, 0x69, 0x9b, 0xb8, 0xe1, 0xc0, 0xf6, 0x42, 0x2f, 0xb4, 0x15, - 0xbb, 0x9d, 0x76, 0xd5, 0x4d, 0x5d, 0xd4, 0x29, 0x53, 0x5d, 0xdb, 0x9e, 0x98, 0x19, 0x30, 0xd7, - 0xe7, 0x01, 0xc4, 0x47, 0x76, 0xd4, 0xf3, 0x24, 0x90, 0xd8, 0x03, 0x10, 0xcc, 0x1e, 0xce, 0x78, - 0x59, 0xb3, 0x7f, 0xc4, 0x8a, 0xd3, 0x40, 0xf0, 0x01, 0xcc, 0x10, 0xae, 0xff, 0x8c, 0x90, 0xb8, - 0x3e, 0x0c, 0xd8, 0x45, 0x9e, 0xf5, 0xd1, 0x40, 0xb5, 0x1d, 0x19, 0xb6, 0xc5, 0x83, 0x1e, 0x7e, - 0x8e, 0xaa, 0xd2, 0x51, 0x87, 0x09, 0xd6, 0x30, 0xd6, 0x8d, 0x8d, 0xa5, 0x6b, 0xff, 0x93, 0xc9, - 0x54, 0xc6, 0xc2, 0x24, 0xea, 0x79, 0x12, 0x48, 0x88, 0xac, 0x26, 0xc3, 0x2d, 0x72, 0xaf, 0xfd, - 0x02, 0x5c, 0x71, 0x17, 0x04, 0x73, 0xf0, 0xf1, 0x59, 0xb3, 0x30, 0x3a, 0x6b, 0xa2, 0x09, 0x46, - 0xc7, 0xaa, 0xf8, 0x21, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x05, 0xa5, 0xbe, 0x4d, 0xe6, 0x9a, 0x39, - 0x19, 0x3b, 0x6c, 0x45, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x6f, 0x54, 0xe9, 0x59, 0x1f, 0x0c, - 0xb4, 0x3c, 0xae, 0xda, 0xe3, 0x89, 0xc0, 0x4f, 0x67, 0xb2, 0x90, 0xf9, 0xb2, 0x48, 0xb6, 0x4a, - 0xb2, 0xa2, 0xfb, 0x54, 0x73, 0x64, 0x2a, 0xc7, 0x21, 0x2a, 0x73, 0x01, 0x83, 0xa4, 0xb1, 0xb0, - 0x5e, 0xbc, 0x30, 0xa6, 0xb9, 0x82, 0x38, 0xcb, 0x5a, 0xbc, 0x7c, 0x47, 0xca, 0xd0, 0x4c, 0xcd, - 0x7a, 0x3f, 0x1d, 0x43, 0xc6, 0xc3, 0x87, 0x68, 0x31, 0x0a, 0xfb, 0xdc, 0x3d, 0xd2, 0x21, 0x36, - 0xe7, 0xec, 0x74, 0xa0, 0x48, 0xce, 0x2f, 0xba, 0xcd, 0x62, 0x76, 0xa7, 0x5a, 0x0c, 0x3f, 0x46, - 0x95, 0x57, 0xd0, 0xf6, 0xc3, 0xb0, 0xa7, 0x57, 0x41, 0xe6, 0xd4, 0x7d, 0x94, 0xb1, 0x9c, 0x5f, - 0xb5, 0x70, 0x45, 0x03, 0x34, 0xd7, 0xb3, 0x5c, 0xa4, 0x9b, 0xe1, 0xff, 0x50, 0xb9, 0x0f, 0x43, - 0xe8, 0x2b, 0xeb, 0x35, 0xe7, 0xcf, 0x3c, 0xf2, 0x9e, 0x04, 0xbf, 0xe6, 0x07, 0x9a, 0x15, 0xe1, - 0x7f, 0xd1, 0x62, 0x22, 0x98, 0x07, 0xd9, 0x4c, 0x6b, 0xce, 0xaa, 0xb4, 0xdd, 0x52, 0x88, 0xac, - 0x55, 0x27, 0xaa, 0x4b, 0xac, 0x37, 0x06, 0x5a, 0x69, 0x41, 0x3c, 0xe4, 0x2e, 0x50, 0xe8, 0x42, - 0x0c, 0x81, 0x0b, 0xd8, 0x46, 0xb5, 0x80, 0x0d, 0x20, 0x89, 0x98, 0x0b, 0xba, 0xe7, 0x6f, 0xba, - 0x67, 0x6d, 0x3f, 0x7f, 0xa0, 0x93, 0x1a, 0xbc, 0x8e, 0x4a, 0xf2, 0xa2, 0x46, 0x50, 0x9b, 0xfc, - 0x2b, 0x59, 0x4b, 0xd5, 0x0b, 0xfe, 0x1b, 0x95, 0x22, 0x26, 0xfc, 0x46, 0x51, 0x55, 0x54, 0xe5, - 0xeb, 0x01, 0x13, 0x3e, 0x55, 0xa8, 0xf5, 0xc5, 0x40, 0x79, 0x7e, 0xdc, 0x45, 0x55, 0xe1, 0xc7, - 0xa1, 0x10, 0x7d, 0xd0, 0xab, 0xba, 0x7d, 0xb5, 0x91, 0x3e, 0xd0, 0xec, 0xdd, 0x30, 0xe8, 0x72, - 0xcf, 0xa9, 0xcb, 0x9f, 0x97, 0x63, 0x74, 0xac, 0x8d, 0x05, 0xaa, 0xbb, 0x7d, 0x0e, 0x81, 0xc8, - 0xea, 0xf4, 0xfa, 0x6e, 0x5d, 0xad, 0xd7, 0xee, 0x94, 0x82, 0xf3, 0xbb, 0xce, 0x5d, 0x9f, 0x46, - 0xe9, 0x77, 0x5d, 0xac, 0x77, 0x06, 0x5a, 0xbd, 0x84, 0x8b, 0xff, 0x42, 0xc5, 0x34, 0xce, 0x17, - 0x5c, 0x19, 0x9d, 0x35, 0x8b, 0x87, 0x74, 0x8f, 0x4a, 0x0c, 0x3f, 0x43, 0x95, 0x24, 0xdb, 0x90, - 0xf6, 0x78, 0x63, 0x4e, 0x8f, 0x17, 0xf7, 0xea, 0x2c, 0xc9, 0x7f, 0x96, 0xa3, 0xb9, 0x28, 0xde, - 0x40, 0x55, 0x97, 0x39, 0x69, 0xd0, 0xe9, 0x83, 0x5a, 0x4f, 0x3d, 0x1b, 0xd9, 0xee, 0x4e, 0x86, - 0xd1, 0xf1, 0xab, 0xd5, 0x42, 0x7f, 0x5c, 0x3a, 0x63, 0xe9, 0xfe, 0x65, 0x94, 0x28, 0xf7, 0xc5, - 0xcc, 0xfd, 0xfd, 0x83, 0x16, 0x95, 0x18, 0x6e, 0xa2, 0x72, 0x3b, 0x8d, 0x13, 0xa1, 0xbc, 0x17, - 0x9d, 0x9a, 0xfc, 0xb7, 0x8e, 0x04, 0x68, 0x86, 0x3b, 0xe4, 0xf8, 0xdc, 0x2c, 0x9c, 0x9c, 0x9b, - 0x85, 0xd3, 0x73, 0xb3, 0xf0, 0x7a, 0x64, 0x1a, 0xc7, 0x23, 0xd3, 0x38, 0x19, 0x99, 0xc6, 0xe9, - 0xc8, 0x34, 0x3e, 0x8d, 0x4c, 0xe3, 0xed, 0x67, 0xb3, 0xf0, 0xa4, 0x9a, 0xa7, 0xfa, 0x16, 0x00, - 0x00, 0xff, 0xff, 0x55, 0x1b, 0x03, 0x56, 0xaf, 0x06, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto index 70801a6c..674debee 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto @@ -83,6 +83,12 @@ message ServiceReference { // this service. // +optional optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; } // Webhook holds the configuration of the webhook @@ -132,8 +138,6 @@ message WebhookClientConfig { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional optional ServiceReference service = 2; diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go index af31cfe2..a0fb48c3 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go @@ -166,8 +166,6 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` @@ -191,4 +189,10 @@ type ServiceReference struct { // this service. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go index edd608f3..1a86f4da 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go @@ -70,6 +70,7 @@ var map_ServiceReference = map[string]string{ "namespace": "`namespace` is the namespace of the service. Required", "name": "`name` is the name of the service. Required", "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", } func (ServiceReference) SwaggerDoc() map[string]string { @@ -89,7 +90,7 @@ func (Webhook) SwaggerDoc() map[string]string { var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a connection with the webhook", "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", } diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go index e71deffa..621a19e8 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *AuditSink) DeepCopyObject() runtime.Object { func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]AuditSink, len(*in)) @@ -131,6 +131,11 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } return } diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 4e7f28d8..02be20de 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -17,37 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto - - It has these top-level messages: - BoundObjectReference - ExtraValue - TokenRequest - TokenRequestSpec - TokenRequestStatus - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -60,41 +46,257 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } -func (*BoundObjectReference) ProtoMessage() {} -func (*BoundObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } +func (*BoundObjectReference) ProtoMessage() {} +func (*BoundObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{0} +} +func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BoundObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoundObjectReference.Merge(m, src) +} +func (m *BoundObjectReference) XXX_Size() int { + return m.Size() +} +func (m *BoundObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_BoundObjectReference.DiscardUnknown(m) +} -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo -func (m *TokenRequest) Reset() { *m = TokenRequest{} } -func (*TokenRequest) ProtoMessage() {} -func (*TokenRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{1} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} -func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } -func (*TokenRequestSpec) ProtoMessage() {} -func (*TokenRequestSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo -func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } -func (*TokenRequestStatus) ProtoMessage() {} -func (*TokenRequestStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{2} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } +func (*TokenRequestSpec) ProtoMessage() {} +func (*TokenRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{3} +} +func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestSpec.Merge(m, src) +} +func (m *TokenRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m) +} -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo + +func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } +func (*TokenRequestStatus) ProtoMessage() {} +func (*TokenRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{4} +} +func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestStatus.Merge(m, src) +} +func (m *TokenRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{5} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReview proto.InternalMessageInfo + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{6} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{7} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{8} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") @@ -106,11 +308,78 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) +} + +var fileDescriptor_2953ea822e7ffe1e = []byte{ + // 903 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x20, 0x2e, 0x5e, 0x09, + 0x55, 0xc0, 0xda, 0x9b, 0x08, 0xc1, 0x6a, 0x91, 0x90, 0x6a, 0x1a, 0x41, 0x84, 0x60, 0x57, 0xb3, + 0xdb, 0x82, 0x38, 0x31, 0xb1, 0x5f, 0x53, 0x13, 0x3c, 0x36, 0xf6, 0x38, 0x6c, 0x6e, 0xfb, 0x27, + 0x70, 0x04, 0x89, 0x03, 0x7f, 0x04, 0x12, 0xff, 0x42, 0x8f, 0x2b, 0x4e, 0x3d, 0xa0, 0x88, 0x9a, + 0x2b, 0x47, 0x4e, 0x9c, 0xd0, 0x8c, 0xa7, 0x71, 0x9c, 0xb4, 0x69, 0x4e, 0x7b, 0x8b, 0xdf, 0xfb, + 0xde, 0xf7, 0xde, 0xfb, 0xe6, 0xcb, 0x0c, 0xea, 0x8d, 0x1e, 0xc4, 0xa6, 0x17, 0x58, 0xa3, 0x64, + 0x00, 0x11, 0x03, 0x0e, 0xb1, 0x35, 0x06, 0xe6, 0x06, 0x91, 0xa5, 0x12, 0x34, 0xf4, 0x2c, 0x9a, + 0xf0, 0x53, 0x60, 0xdc, 0x73, 0x28, 0xf7, 0x02, 0x66, 0x8d, 0x3b, 0xd6, 0x10, 0x18, 0x44, 0x94, + 0x83, 0x6b, 0x86, 0x51, 0xc0, 0x03, 0xfc, 0x7a, 0x86, 0x36, 0x69, 0xe8, 0x99, 0x45, 0xb4, 0x39, + 0xee, 0xec, 0xde, 0x1b, 0x7a, 0xfc, 0x34, 0x19, 0x98, 0x4e, 0xe0, 0x5b, 0xc3, 0x60, 0x18, 0x58, + 0xb2, 0x68, 0x90, 0x9c, 0xc8, 0x2f, 0xf9, 0x21, 0x7f, 0x65, 0x64, 0xbb, 0xef, 0xe5, 0xad, 0x7d, + 0xea, 0x9c, 0x7a, 0x0c, 0xa2, 0x89, 0x15, 0x8e, 0x86, 0x22, 0x10, 0x5b, 0x3e, 0x70, 0x7a, 0xc5, + 0x08, 0xbb, 0xd6, 0x75, 0x55, 0x51, 0xc2, 0xb8, 0xe7, 0xc3, 0x52, 0xc1, 0xfb, 0x37, 0x15, 0xc4, + 0xce, 0x29, 0xf8, 0x74, 0xb1, 0xce, 0xf8, 0x43, 0x43, 0xaf, 0xda, 0x41, 0xc2, 0xdc, 0x47, 0x83, + 0x6f, 0xc1, 0xe1, 0x04, 0x4e, 0x20, 0x02, 0xe6, 0x00, 0xde, 0x43, 0xd5, 0x91, 0xc7, 0xdc, 0x96, + 0xb6, 0xa7, 0xed, 0x37, 0xec, 0x5b, 0x67, 0x53, 0xbd, 0x94, 0x4e, 0xf5, 0xea, 0x67, 0x1e, 0x73, + 0x89, 0xcc, 0xe0, 0x2e, 0x42, 0xf4, 0x71, 0xff, 0x18, 0xa2, 0xd8, 0x0b, 0x58, 0xab, 0x2c, 0x71, + 0x58, 0xe1, 0xd0, 0xc1, 0x2c, 0x43, 0xe6, 0x50, 0x82, 0x95, 0x51, 0x1f, 0x5a, 0x95, 0x22, 0xeb, + 0x17, 0xd4, 0x07, 0x22, 0x33, 0xd8, 0x46, 0x95, 0xa4, 0x7f, 0xd8, 0xaa, 0x4a, 0xc0, 0x7d, 0x05, + 0xa8, 0x1c, 0xf5, 0x0f, 0xff, 0x9b, 0xea, 0x6f, 0x5e, 0xb7, 0x24, 0x9f, 0x84, 0x10, 0x9b, 0x47, + 0xfd, 0x43, 0x22, 0x8a, 0x8d, 0x0f, 0x10, 0xea, 0x3d, 0xe3, 0x11, 0x3d, 0xa6, 0xdf, 0x25, 0x80, + 0x75, 0x54, 0xf3, 0x38, 0xf8, 0x71, 0x4b, 0xdb, 0xab, 0xec, 0x37, 0xec, 0x46, 0x3a, 0xd5, 0x6b, + 0x7d, 0x11, 0x20, 0x59, 0xfc, 0x61, 0xfd, 0xa7, 0x5f, 0xf5, 0xd2, 0xf3, 0x3f, 0xf7, 0x4a, 0xc6, + 0x2f, 0x65, 0x74, 0xeb, 0x69, 0x30, 0x02, 0x46, 0xe0, 0xfb, 0x04, 0x62, 0x8e, 0xbf, 0x41, 0x75, + 0x71, 0x44, 0x2e, 0xe5, 0x54, 0x2a, 0xd1, 0xec, 0xde, 0x37, 0x73, 0x77, 0xcc, 0x86, 0x30, 0xc3, + 0xd1, 0x50, 0x04, 0x62, 0x53, 0xa0, 0xcd, 0x71, 0xc7, 0xcc, 0xe4, 0xfc, 0x1c, 0x38, 0xcd, 0x35, + 0xc9, 0x63, 0x64, 0xc6, 0x8a, 0x1f, 0xa3, 0x6a, 0x1c, 0x82, 0x23, 0xf5, 0x6b, 0x76, 0x4d, 0x73, + 0x95, 0xf7, 0xcc, 0xf9, 0xd9, 0x9e, 0x84, 0xe0, 0xe4, 0x0a, 0x8a, 0x2f, 0x22, 0x99, 0xf0, 0x57, + 0x68, 0x23, 0xe6, 0x94, 0x27, 0xb1, 0x54, 0xb9, 0x38, 0xf1, 0x4d, 0x9c, 0xb2, 0xce, 0xde, 0x52, + 0xac, 0x1b, 0xd9, 0x37, 0x51, 0x7c, 0xc6, 0xbf, 0x1a, 0xda, 0x5e, 0x1c, 0x01, 0xbf, 0x83, 0x1a, + 0x34, 0x71, 0x3d, 0x61, 0x9a, 0x4b, 0x89, 0x37, 0xd3, 0xa9, 0xde, 0x38, 0xb8, 0x0c, 0x92, 0x3c, + 0x8f, 0x3f, 0x46, 0x3b, 0xf0, 0x2c, 0xf4, 0x22, 0xd9, 0xfd, 0x09, 0x38, 0x01, 0x73, 0x63, 0x79, + 0xd6, 0x15, 0xfb, 0x4e, 0x3a, 0xd5, 0x77, 0x7a, 0x8b, 0x49, 0xb2, 0x8c, 0xc7, 0x0c, 0x6d, 0x0d, + 0x0a, 0x96, 0x55, 0x8b, 0x76, 0x57, 0x2f, 0x7a, 0x95, 0xcd, 0x6d, 0x9c, 0x4e, 0xf5, 0xad, 0x62, + 0x86, 0x2c, 0xb0, 0x1b, 0xbf, 0x69, 0x08, 0x2f, 0xab, 0x84, 0xef, 0xa2, 0x1a, 0x17, 0x51, 0xf5, + 0x17, 0xd9, 0x54, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x13, 0x74, 0x3b, 0x5f, 0xe0, 0xa9, 0xe7, + 0x43, 0xcc, 0xa9, 0x1f, 0xaa, 0xd3, 0x7e, 0x7b, 0x3d, 0x2f, 0x89, 0x32, 0xfb, 0x35, 0x45, 0x7f, + 0xbb, 0xb7, 0x4c, 0x47, 0xae, 0xea, 0x61, 0xfc, 0x5c, 0x46, 0x4d, 0x35, 0xf6, 0xd8, 0x83, 0x1f, + 0x5e, 0x82, 0x97, 0x1f, 0x15, 0xbc, 0x7c, 0x6f, 0x2d, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x97, + 0x0b, 0x56, 0xb6, 0xd6, 0xa7, 0x5c, 0xed, 0x64, 0x07, 0xbd, 0xb2, 0xd0, 0x7f, 0xbd, 0xe3, 0x2c, + 0x98, 0xbd, 0xbc, 0xda, 0xec, 0xc6, 0x3f, 0x1a, 0xda, 0x59, 0x1a, 0x09, 0x7f, 0x88, 0x36, 0xe7, + 0x26, 0x87, 0xec, 0x86, 0xad, 0xdb, 0x77, 0x54, 0xbf, 0xcd, 0x83, 0xf9, 0x24, 0x29, 0x62, 0xf1, + 0xa7, 0xa8, 0x9a, 0xc4, 0x10, 0x29, 0x85, 0xdf, 0x5a, 0x2d, 0xc7, 0x51, 0x0c, 0x51, 0x9f, 0x9d, + 0x04, 0xb9, 0xb4, 0x22, 0x42, 0x24, 0x43, 0x71, 0x93, 0xea, 0x0d, 0x7f, 0xdb, 0xbb, 0xa8, 0x06, + 0x51, 0x14, 0x44, 0xea, 0xde, 0x9e, 0x69, 0xd3, 0x13, 0x41, 0x92, 0xe5, 0x8c, 0xdf, 0xcb, 0xa8, + 0x7e, 0xd9, 0x12, 0xbf, 0x8b, 0xea, 0xa2, 0x8d, 0xbc, 0xec, 0x33, 0x41, 0xb7, 0x55, 0x91, 0xc4, + 0x88, 0x38, 0x99, 0x21, 0xf0, 0x1b, 0xa8, 0x92, 0x78, 0xae, 0x7a, 0x43, 0x9a, 0x73, 0x97, 0x3e, + 0x11, 0x71, 0x6c, 0xa0, 0x8d, 0x61, 0x14, 0x24, 0xa1, 0xb0, 0x81, 0x18, 0x14, 0x89, 0x13, 0xfd, + 0x44, 0x46, 0x88, 0xca, 0xe0, 0x63, 0x54, 0x03, 0x71, 0xe7, 0xcb, 0x5d, 0x9a, 0xdd, 0xce, 0x7a, + 0xd2, 0x98, 0xf2, 0x9d, 0xe8, 0x31, 0x1e, 0x4d, 0xe6, 0xb6, 0x12, 0x31, 0x92, 0xd1, 0xed, 0x0e, + 0xd4, 0x5b, 0x22, 0x31, 0x78, 0x1b, 0x55, 0x46, 0x30, 0xc9, 0x36, 0x22, 0xe2, 0x27, 0xfe, 0x08, + 0xd5, 0xc6, 0xe2, 0x99, 0x51, 0x47, 0xb2, 0xbf, 0xba, 0x6f, 0xfe, 0x2c, 0x91, 0xac, 0xec, 0x61, + 0xf9, 0x81, 0x66, 0xef, 0x9f, 0x5d, 0xb4, 0x4b, 0x2f, 0x2e, 0xda, 0xa5, 0xf3, 0x8b, 0x76, 0xe9, + 0x79, 0xda, 0xd6, 0xce, 0xd2, 0xb6, 0xf6, 0x22, 0x6d, 0x6b, 0xe7, 0x69, 0x5b, 0xfb, 0x2b, 0x6d, + 0x6b, 0x3f, 0xfe, 0xdd, 0x2e, 0x7d, 0x5d, 0x1e, 0x77, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x8c, + 0x44, 0x87, 0xd0, 0xe2, 0x08, 0x00, 0x00, +} + func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -118,33 +387,42 @@ func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { } func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoundObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.UID) + copy(dAtA[i:], m.UID) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,32 +430,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,41 +462,52 @@ func (m *TokenRequest) Marshal() (dAtA []byte, err error) { } func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -227,47 +515,48 @@ func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x20 } if m.BoundObjectRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.BoundObjectRef.Size())) - n4, err := m.BoundObjectRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.BoundObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.ExpirationSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -275,29 +564,37 @@ func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpirationTimestamp.Size())) - n5, err := m.ExpirationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -305,41 +602,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n7, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n8, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -347,36 +655,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) if len(m.Audiences) > 0 { - for _, s := range m.Audiences { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -384,52 +692,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n9, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -437,77 +747,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - return i, nil + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *BoundObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -522,6 +836,9 @@ func (m *BoundObjectReference) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -534,6 +851,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -546,6 +866,9 @@ func (m *TokenRequest) Size() (n int) { } func (m *TokenRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Audiences) > 0 { @@ -565,6 +888,9 @@ func (m *TokenRequestSpec) Size() (n int) { } func (m *TokenRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -575,6 +901,9 @@ func (m *TokenRequestStatus) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -587,6 +916,9 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -601,6 +933,9 @@ func (m *TokenReviewSpec) Size() (n int) { } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -618,6 +953,9 @@ func (m *TokenReviewStatus) Size() (n int) { } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -643,14 +981,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -673,7 +1004,7 @@ func (this *TokenRequest) String() string { return "nil" } s := strings.Join([]string{`&TokenRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -686,7 +1017,7 @@ func (this *TokenRequestSpec) String() string { } s := strings.Join([]string{`&TokenRequestSpec{`, `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, - `BoundObjectRef:` + strings.Replace(fmt.Sprintf("%v", this.BoundObjectRef), "BoundObjectReference", "BoundObjectReference", 1) + `,`, + `BoundObjectRef:` + strings.Replace(this.BoundObjectRef.String(), "BoundObjectReference", "BoundObjectReference", 1) + `,`, `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, `}`, }, "") @@ -698,7 +1029,7 @@ func (this *TokenRequestStatus) String() string { } s := strings.Join([]string{`&TokenRequestStatus{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `ExpirationTimestamp:` + strings.Replace(strings.Replace(this.ExpirationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -708,7 +1039,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -785,7 +1116,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -813,7 +1144,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -823,6 +1154,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -842,7 +1176,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -852,6 +1186,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -871,7 +1208,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -881,6 +1218,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -900,7 +1240,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -910,6 +1250,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -924,6 +1267,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -951,7 +1297,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -979,7 +1325,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -989,6 +1335,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1003,6 +1352,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1030,7 +1382,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1058,7 +1410,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1067,6 +1419,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1088,7 +1443,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1097,6 +1452,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1118,7 +1476,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1127,6 +1485,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1143,6 +1504,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1170,7 +1534,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1198,7 +1562,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1208,6 +1572,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1227,7 +1594,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1236,6 +1603,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1260,7 +1630,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1275,6 +1645,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1675,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1330,7 +1703,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1340,6 +1713,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1359,7 +1735,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1368,6 +1744,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1384,6 +1763,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1411,7 +1793,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1439,7 +1821,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1448,6 +1830,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,7 +1854,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1478,6 +1863,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1499,7 +1887,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1508,6 +1896,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1524,6 +1915,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1551,7 +1945,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1579,7 +1973,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1589,6 +1983,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1608,7 +2005,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1618,6 +2015,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1632,6 +2032,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1659,7 +2062,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1687,7 +2090,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1707,7 +2110,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1716,6 +2119,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1737,7 +2143,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1747,6 +2153,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1766,7 +2175,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1776,6 +2185,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1790,6 +2202,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1817,7 +2232,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1845,7 +2260,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1855,6 +2270,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1874,7 +2292,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1884,6 +2302,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1903,7 +2324,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1913,6 +2334,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1932,7 +2356,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1941,6 +2365,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1961,7 +2388,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1978,7 +2405,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1988,6 +2415,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2004,7 +2434,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2013,7 +2443,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2050,6 +2480,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2116,10 +2549,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2148,6 +2584,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2166,68 +2605,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42, - 0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec, - 0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09, - 0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd, - 0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe, - 0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c, - 0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13, - 0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39, - 0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93, - 0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92, - 0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea, - 0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84, - 0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67, - 0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37, - 0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b, - 0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44, - 0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac, - 0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73, - 0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54, - 0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd, - 0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b, - 0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa, - 0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f, - 0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5, - 0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7, - 0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24, - 0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75, - 0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1, - 0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58, - 0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68, - 0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e, - 0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a, - 0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4, - 0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92, - 0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17, - 0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10, - 0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee, - 0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff, - 0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b, - 0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98, - 0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c, - 0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f, - 0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b, - 0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb, - 0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d, - 0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27, - 0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e, - 0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11, - 0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6, - 0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96, - 0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89, - 0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf, - 0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba, - 0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc, - 0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f, - 0xe2, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 5f34e76a..0721bda8 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -17,31 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -54,25 +44,145 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_TokenReview proto.InternalMessageInfo -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{3} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{4} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") @@ -80,11 +190,63 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.UserInfo.ExtraEntry") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_77c9b20d3ad27844) +} + +var fileDescriptor_77c9b20d3ad27844 = []byte{ + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xb6, 0xf3, 0x53, 0x92, 0x0d, 0x81, 0xb2, 0x12, 0x52, 0x14, 0x09, 0xa7, 0x84, 0x4b, 0xa5, + 0xd2, 0x35, 0xad, 0xaa, 0x52, 0x95, 0x53, 0x0d, 0x15, 0x2a, 0x52, 0x85, 0xb4, 0xb4, 0x1c, 0x80, + 0x03, 0x1b, 0x67, 0xea, 0x98, 0xe0, 0x1f, 0xad, 0xd7, 0x81, 0xde, 0xfa, 0x08, 0x1c, 0x39, 0x22, + 0xf1, 0x24, 0xdc, 0x7a, 0xec, 0xb1, 0x07, 0x14, 0x51, 0xf3, 0x04, 0xbc, 0x01, 0xda, 0xf5, 0xb6, + 0x4e, 0x1b, 0x41, 0xdb, 0x9b, 0xf7, 0x9b, 0xf9, 0xbe, 0x99, 0xf9, 0xc6, 0x83, 0x5e, 0x0c, 0xd7, + 0x12, 0xe2, 0x47, 0xf6, 0x30, 0xed, 0x01, 0x0f, 0x41, 0x40, 0x62, 0x8f, 0x20, 0xec, 0x47, 0xdc, + 0xd6, 0x01, 0x16, 0xfb, 0x36, 0x4b, 0xc5, 0x00, 0x42, 0xe1, 0xbb, 0x4c, 0xf8, 0x51, 0x68, 0x8f, + 0x96, 0x7a, 0x20, 0xd8, 0x92, 0xed, 0x41, 0x08, 0x9c, 0x09, 0xe8, 0x93, 0x98, 0x47, 0x22, 0xc2, + 0xf7, 0x73, 0x0a, 0x61, 0xb1, 0x4f, 0xce, 0x53, 0x88, 0xa6, 0xb4, 0x17, 0x3d, 0x5f, 0x0c, 0xd2, + 0x1e, 0x71, 0xa3, 0xc0, 0xf6, 0x22, 0x2f, 0xb2, 0x15, 0xb3, 0x97, 0xee, 0xa9, 0x97, 0x7a, 0xa8, + 0xaf, 0x5c, 0xb1, 0xbd, 0x52, 0x34, 0x11, 0x30, 0x77, 0xe0, 0x87, 0xc0, 0xf7, 0xed, 0x78, 0xe8, + 0x49, 0x20, 0xb1, 0x03, 0x10, 0xcc, 0x1e, 0x4d, 0xf5, 0xd1, 0xb6, 0xff, 0xc5, 0xe2, 0x69, 0x28, + 0xfc, 0x00, 0xa6, 0x08, 0xab, 0x97, 0x11, 0x12, 0x77, 0x00, 0x01, 0xbb, 0xc8, 0xeb, 0x3e, 0x46, + 0x68, 0xf3, 0xb3, 0xe0, 0xec, 0x35, 0xfb, 0x98, 0x02, 0xee, 0xa0, 0xaa, 0x2f, 0x20, 0x48, 0x5a, + 0xe6, 0x5c, 0x79, 0xbe, 0xee, 0xd4, 0xb3, 0x71, 0xa7, 0xba, 0x25, 0x01, 0x9a, 0xe3, 0xeb, 0xb5, + 0xaf, 0xdf, 0x3a, 0xc6, 0xc1, 0xcf, 0x39, 0xa3, 0xfb, 0xbd, 0x84, 0x1a, 0x3b, 0xd1, 0x10, 0x42, + 0x0a, 0x23, 0x1f, 0x3e, 0xe1, 0xf7, 0xa8, 0x26, 0x87, 0xe9, 0x33, 0xc1, 0x5a, 0xe6, 0x9c, 0x39, + 0xdf, 0x58, 0x7e, 0x44, 0x0a, 0x33, 0xcf, 0x7a, 0x22, 0xf1, 0xd0, 0x93, 0x40, 0x42, 0x64, 0x36, + 0x19, 0x2d, 0x91, 0x97, 0xbd, 0x0f, 0xe0, 0x8a, 0x6d, 0x10, 0xcc, 0xc1, 0x87, 0xe3, 0x8e, 0x91, + 0x8d, 0x3b, 0xa8, 0xc0, 0xe8, 0x99, 0x2a, 0xde, 0x41, 0x95, 0x24, 0x06, 0xb7, 0x55, 0x52, 0xea, + 0xcb, 0xe4, 0xd2, 0x55, 0x91, 0x89, 0xfe, 0x5e, 0xc5, 0xe0, 0x3a, 0x37, 0xb5, 0x7e, 0x45, 0xbe, + 0xa8, 0x52, 0xc3, 0xef, 0xd0, 0x4c, 0x22, 0x98, 0x48, 0x93, 0x56, 0x59, 0xe9, 0xae, 0x5c, 0x53, + 0x57, 0x71, 0x9d, 0x5b, 0x5a, 0x79, 0x26, 0x7f, 0x53, 0xad, 0xd9, 0x75, 0xd1, 0xed, 0x0b, 0x4d, + 0xe0, 0x07, 0xa8, 0x2a, 0x24, 0xa4, 0x5c, 0xaa, 0x3b, 0x4d, 0xcd, 0xac, 0xe6, 0x79, 0x79, 0x0c, + 0x2f, 0xa0, 0x3a, 0x4b, 0xfb, 0x3e, 0x84, 0x2e, 0x24, 0xad, 0x92, 0x5a, 0x46, 0x33, 0x1b, 0x77, + 0xea, 0x1b, 0xa7, 0x20, 0x2d, 0xe2, 0xdd, 0x3f, 0x26, 0xba, 0x33, 0xd5, 0x12, 0x7e, 0x82, 0x9a, + 0x13, 0xed, 0x43, 0x5f, 0xd5, 0xab, 0x39, 0x77, 0x75, 0xbd, 0xe6, 0xc6, 0x64, 0x90, 0x9e, 0xcf, + 0xc5, 0xdb, 0xa8, 0x92, 0x26, 0xc0, 0xb5, 0xd7, 0x0b, 0x57, 0xf0, 0x64, 0x37, 0x01, 0xbe, 0x15, + 0xee, 0x45, 0x85, 0xc9, 0x12, 0xa1, 0x4a, 0xe6, 0xfc, 0x38, 0x95, 0xff, 0x8f, 0x23, 0x0d, 0x02, + 0xce, 0x23, 0xae, 0x16, 0x32, 0x61, 0xd0, 0xa6, 0x04, 0x69, 0x1e, 0xeb, 0xfe, 0x28, 0xa1, 0xda, + 0x69, 0x49, 0xfc, 0x10, 0xd5, 0x64, 0x99, 0x90, 0x05, 0xa0, 0x5d, 0x9d, 0xd5, 0x24, 0x95, 0x23, + 0x71, 0x7a, 0x96, 0x81, 0xef, 0xa1, 0x72, 0xea, 0xf7, 0xd5, 0x68, 0x75, 0xa7, 0xa1, 0x13, 0xcb, + 0xbb, 0x5b, 0xcf, 0xa8, 0xc4, 0x71, 0x17, 0xcd, 0x78, 0x3c, 0x4a, 0x63, 0xf9, 0x43, 0xc8, 0x46, + 0x91, 0x5c, 0xeb, 0x73, 0x85, 0x50, 0x1d, 0xc1, 0x6f, 0x51, 0x15, 0xe4, 0xd5, 0xa8, 0x59, 0x1a, + 0xcb, 0xab, 0xd7, 0xf0, 0x87, 0xa8, 0x73, 0xdb, 0x0c, 0x05, 0xdf, 0x9f, 0x18, 0x4d, 0x62, 0x34, + 0xd7, 0x6c, 0x7b, 0xfa, 0x24, 0x55, 0x0e, 0x9e, 0x45, 0xe5, 0x21, 0xec, 0xe7, 0x63, 0x51, 0xf9, + 0x89, 0x9f, 0xa2, 0xea, 0x48, 0x5e, 0xab, 0x5e, 0xce, 0xe2, 0x15, 0x8a, 0x17, 0x27, 0x4e, 0x73, + 0xee, 0x7a, 0x69, 0xcd, 0x74, 0x16, 0x0f, 0x4f, 0x2c, 0xe3, 0xe8, 0xc4, 0x32, 0x8e, 0x4f, 0x2c, + 0xe3, 0x20, 0xb3, 0xcc, 0xc3, 0xcc, 0x32, 0x8f, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x57, 0x66, + 0x99, 0x5f, 0x7e, 0x5b, 0xc6, 0x9b, 0x1b, 0x5a, 0xe4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, + 0xbb, 0x89, 0x53, 0x68, 0x05, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -92,32 +254,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -125,41 +286,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -167,36 +339,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) if len(m.Audiences) > 0 { - for _, s := range m.Audiences { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -204,52 +376,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n4, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -257,77 +431,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n5, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - return i, nil + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -340,6 +518,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -352,6 +533,9 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -366,6 +550,9 @@ func (m *TokenReviewSpec) Size() (n int) { } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -383,6 +570,9 @@ func (m *TokenReviewStatus) Size() (n int) { } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -408,14 +598,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -425,7 +608,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -502,7 +685,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -530,7 +713,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -540,6 +723,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -554,6 +740,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -581,7 +770,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -609,7 +798,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -618,6 +807,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -639,7 +831,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -648,6 +840,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -669,7 +864,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -678,6 +873,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -694,6 +892,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -721,7 +922,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -749,7 +950,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -759,6 +960,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -778,7 +982,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -788,6 +992,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -802,6 +1009,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -829,7 +1039,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -857,7 +1067,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -877,7 +1087,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -886,6 +1096,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -907,7 +1120,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -917,6 +1130,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -936,7 +1152,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -946,6 +1162,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -960,6 +1179,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -987,7 +1209,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1015,7 +1237,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1025,6 +1247,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1044,7 +1269,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1054,6 +1279,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1073,7 +1301,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1083,6 +1311,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1102,7 +1333,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1111,6 +1342,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1131,7 +1365,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1148,7 +1382,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1158,6 +1392,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1174,7 +1411,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1183,7 +1420,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -1220,6 +1457,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1286,10 +1526,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1318,6 +1561,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1336,53 +1582,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 663 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x4e, 0x14, 0x4d, - 0x14, 0xed, 0x9e, 0x1f, 0xbe, 0x99, 0x9a, 0x6f, 0x14, 0x2b, 0x31, 0x99, 0x4c, 0x62, 0x0f, 0x8e, - 0x1b, 0x12, 0xa4, 0x5a, 0x08, 0x41, 0x82, 0x2b, 0x5a, 0x89, 0xc1, 0x84, 0x98, 0x94, 0xe0, 0x42, - 0x5d, 0x58, 0xd3, 0x73, 0xe9, 0x69, 0xc7, 0xfe, 0x49, 0x55, 0xf5, 0x28, 0x3b, 0x1e, 0xc1, 0xa5, - 0x4b, 0x13, 0x9f, 0xc4, 0x1d, 0x4b, 0x96, 0x2c, 0xcc, 0x44, 0xda, 0x27, 0xf0, 0x0d, 0x4c, 0x55, - 0x17, 0xcc, 0x00, 0x31, 0xc0, 0xae, 0xeb, 0xdc, 0x7b, 0xce, 0x3d, 0xf7, 0x54, 0x17, 0x7a, 0x31, - 0x5c, 0x13, 0x24, 0x4c, 0xdc, 0x61, 0xd6, 0x03, 0x1e, 0x83, 0x04, 0xe1, 0x8e, 0x20, 0xee, 0x27, - 0xdc, 0x35, 0x05, 0x96, 0x86, 0x2e, 0xcb, 0xe4, 0x00, 0x62, 0x19, 0xfa, 0x4c, 0x86, 0x49, 0xec, - 0x8e, 0x96, 0x7a, 0x20, 0xd9, 0x92, 0x1b, 0x40, 0x0c, 0x9c, 0x49, 0xe8, 0x93, 0x94, 0x27, 0x32, - 0xc1, 0xf7, 0x0b, 0x0a, 0x61, 0x69, 0x48, 0xce, 0x53, 0x88, 0xa1, 0xb4, 0x17, 0x83, 0x50, 0x0e, - 0xb2, 0x1e, 0xf1, 0x93, 0xc8, 0x0d, 0x92, 0x20, 0x71, 0x35, 0xb3, 0x97, 0xed, 0xe9, 0x93, 0x3e, - 0xe8, 0xaf, 0x42, 0xb1, 0xbd, 0x32, 0x31, 0x11, 0x31, 0x7f, 0x10, 0xc6, 0xc0, 0xf7, 0xdd, 0x74, - 0x18, 0x28, 0x40, 0xb8, 0x11, 0x48, 0xe6, 0x8e, 0x2e, 0xf9, 0x68, 0xbb, 0xff, 0x62, 0xf1, 0x2c, - 0x96, 0x61, 0x04, 0x97, 0x08, 0xab, 0x57, 0x11, 0x84, 0x3f, 0x80, 0x88, 0x5d, 0xe4, 0x75, 0x1f, - 0x23, 0xb4, 0xf9, 0x59, 0x72, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x50, 0x35, 0x94, 0x10, 0x89, - 0x96, 0x3d, 0x57, 0x9e, 0xaf, 0x7b, 0xf5, 0x7c, 0xdc, 0xa9, 0x6e, 0x29, 0x80, 0x16, 0xf8, 0x7a, - 0xed, 0xeb, 0xb7, 0x8e, 0x75, 0xf0, 0x73, 0xce, 0xea, 0x7e, 0x2f, 0xa1, 0xc6, 0x4e, 0x32, 0x84, - 0x98, 0xc2, 0x28, 0x84, 0x4f, 0xf8, 0x3d, 0xaa, 0xa9, 0x65, 0xfa, 0x4c, 0xb2, 0x96, 0x3d, 0x67, - 0xcf, 0x37, 0x96, 0x1f, 0x91, 0x49, 0x98, 0x67, 0x9e, 0x48, 0x3a, 0x0c, 0x14, 0x20, 0x88, 0xea, - 0x26, 0xa3, 0x25, 0xf2, 0xb2, 0xf7, 0x01, 0x7c, 0xb9, 0x0d, 0x92, 0x79, 0xf8, 0x70, 0xdc, 0xb1, - 0xf2, 0x71, 0x07, 0x4d, 0x30, 0x7a, 0xa6, 0x8a, 0x77, 0x50, 0x45, 0xa4, 0xe0, 0xb7, 0x4a, 0x5a, - 0x7d, 0x99, 0x5c, 0x79, 0x55, 0x64, 0xca, 0xdf, 0xab, 0x14, 0x7c, 0xef, 0x7f, 0xa3, 0x5f, 0x51, - 0x27, 0xaa, 0xd5, 0xf0, 0x3b, 0x34, 0x23, 0x24, 0x93, 0x99, 0x68, 0x95, 0xb5, 0xee, 0xca, 0x0d, - 0x75, 0x35, 0xd7, 0xbb, 0x65, 0x94, 0x67, 0x8a, 0x33, 0x35, 0x9a, 0x5d, 0x1f, 0xdd, 0xbe, 0x60, - 0x02, 0x3f, 0x40, 0x55, 0xa9, 0x20, 0x9d, 0x52, 0xdd, 0x6b, 0x1a, 0x66, 0xb5, 0xe8, 0x2b, 0x6a, - 0x78, 0x01, 0xd5, 0x59, 0xd6, 0x0f, 0x21, 0xf6, 0x41, 0xb4, 0x4a, 0xfa, 0x32, 0x9a, 0xf9, 0xb8, - 0x53, 0xdf, 0x38, 0x05, 0xe9, 0xa4, 0xde, 0xfd, 0x63, 0xa3, 0x3b, 0x97, 0x2c, 0xe1, 0x27, 0xa8, - 0x39, 0x65, 0x1f, 0xfa, 0x7a, 0x5e, 0xcd, 0xbb, 0x6b, 0xe6, 0x35, 0x37, 0xa6, 0x8b, 0xf4, 0x7c, - 0x2f, 0xde, 0x46, 0x95, 0x4c, 0x00, 0x37, 0x59, 0x2f, 0x5c, 0x23, 0x93, 0x5d, 0x01, 0x7c, 0x2b, - 0xde, 0x4b, 0x26, 0x21, 0x2b, 0x84, 0x6a, 0x19, 0xb5, 0x33, 0x70, 0x9e, 0x70, 0x9d, 0xf1, 0xd4, - 0xce, 0x9b, 0x0a, 0xa4, 0x45, 0xed, 0xfc, 0xce, 0x95, 0x2b, 0x76, 0xfe, 0x51, 0x42, 0xb5, 0xd3, - 0x91, 0xf8, 0x21, 0xaa, 0xa9, 0x31, 0x31, 0x8b, 0xc0, 0xa4, 0x3a, 0x6b, 0x26, 0xe8, 0x1e, 0x85, - 0xd3, 0xb3, 0x0e, 0x7c, 0x0f, 0x95, 0xb3, 0xb0, 0xaf, 0x57, 0xab, 0x7b, 0x0d, 0xd3, 0x58, 0xde, - 0xdd, 0x7a, 0x46, 0x15, 0x8e, 0xbb, 0x68, 0x26, 0xe0, 0x49, 0x96, 0xaa, 0x1f, 0x42, 0x79, 0x40, - 0xea, 0x5a, 0x9f, 0x6b, 0x84, 0x9a, 0x0a, 0x7e, 0x8b, 0xaa, 0xa0, 0x5e, 0x8d, 0xb6, 0xd9, 0x58, - 0x5e, 0xbd, 0x41, 0x3e, 0x44, 0x3f, 0xb7, 0xcd, 0x58, 0xf2, 0xfd, 0xa9, 0x1c, 0x14, 0x46, 0x0b, - 0xcd, 0x76, 0x60, 0x9e, 0xa4, 0xee, 0xc1, 0xb3, 0xa8, 0x3c, 0x84, 0xfd, 0x62, 0x2d, 0xaa, 0x3e, - 0xf1, 0x53, 0x54, 0x1d, 0xa9, 0xd7, 0x6a, 0x2e, 0x67, 0xf1, 0x1a, 0xc3, 0x27, 0x4f, 0x9c, 0x16, - 0xdc, 0xf5, 0xd2, 0x9a, 0xed, 0x2d, 0x1e, 0x9e, 0x38, 0xd6, 0xd1, 0x89, 0x63, 0x1d, 0x9f, 0x38, - 0xd6, 0x41, 0xee, 0xd8, 0x87, 0xb9, 0x63, 0x1f, 0xe5, 0x8e, 0x7d, 0x9c, 0x3b, 0xf6, 0xaf, 0xdc, - 0xb1, 0xbf, 0xfc, 0x76, 0xac, 0x37, 0xff, 0x19, 0x91, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, - 0xd6, 0x32, 0x28, 0x68, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index fc6a25f6..0dc01bc9 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -17,40 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -63,73 +44,397 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_e50da13573e369bd, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_e50da13573e369bd, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) } -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_e50da13573e369bd, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) } -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_e50da13573e369bd, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() } +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_e50da13573e369bd, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) +} +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() } +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_e50da13573e369bd, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") @@ -144,13 +449,95 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptor_e50da13573e369bd) +} + +var fileDescriptor_e50da13573e369bd = []byte{ + // 1140 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xda, 0xd1, + 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, + 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, + 0xc2, 0xa9, 0x12, 0xff, 0x00, 0x47, 0x0e, 0x1c, 0xf8, 0x0f, 0xb8, 0x20, 0x71, 0xe3, 0xc0, 0x01, + 0xe5, 0xd8, 0x63, 0x91, 0x90, 0x45, 0x96, 0x33, 0xff, 0x03, 0x9a, 0xd9, 0xb1, 0x77, 0x9d, 0xac, + 0xdd, 0x84, 0x03, 0xbd, 0xf4, 0xb6, 0xfb, 0x3e, 0x9f, 0xf7, 0xe6, 0xcd, 0xfb, 0x35, 0x0f, 0x6c, + 0x1f, 0x6d, 0x32, 0xcb, 0xf5, 0xed, 0xa3, 0xb0, 0x49, 0xa8, 0x47, 0x38, 0x61, 0x76, 0x9f, 0x78, + 0x2d, 0x9f, 0xda, 0x0a, 0xc0, 0x81, 0x6b, 0xe3, 0x90, 0x77, 0x7c, 0xea, 0x7e, 0x8d, 0xb9, 0xeb, + 0x7b, 0x76, 0x7f, 0xdd, 0x6e, 0x13, 0x8f, 0x50, 0xcc, 0x49, 0xcb, 0x0a, 0xa8, 0xcf, 0x7d, 0x78, + 0x2b, 0x26, 0x5b, 0x38, 0x70, 0xad, 0x31, 0xb2, 0xd5, 0x5f, 0x5f, 0x79, 0xaf, 0xed, 0xf2, 0x4e, + 0xd8, 0xb4, 0x1c, 0xbf, 0x67, 0xb7, 0xfd, 0xb6, 0x6f, 0x4b, 0x9d, 0x66, 0x78, 0x28, 0xff, 0xe4, + 0x8f, 0xfc, 0x8a, 0x6d, 0xad, 0xdc, 0x4d, 0x0e, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x9e, 0xd8, + 0xc1, 0x51, 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x78, 0xb0, 0x62, 0x4f, 0xd2, 0xa2, 0xa1, + 0xc7, 0xdd, 0x1e, 0xb9, 0xa0, 0x70, 0xff, 0x55, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xaf, 0x67, + 0x6e, 0x00, 0xb0, 0xf3, 0x15, 0xa7, 0xf8, 0x00, 0x77, 0x43, 0x02, 0x6b, 0xa0, 0xe8, 0x72, 0xd2, + 0x63, 0x86, 0xb6, 0x9a, 0x5f, 0x2b, 0x37, 0xca, 0xd1, 0xa0, 0x56, 0xdc, 0x15, 0x02, 0x14, 0xcb, + 0x1f, 0x94, 0xbe, 0xfb, 0xa1, 0x96, 0x7b, 0xfe, 0xc7, 0x6a, 0xce, 0xfc, 0x49, 0x07, 0xc6, 0x23, + 0xdf, 0xc1, 0xdd, 0xbd, 0xb0, 0xf9, 0x05, 0x71, 0xf8, 0x96, 0xe3, 0x10, 0xc6, 0x10, 0xe9, 0xbb, + 0xe4, 0x18, 0x7e, 0x0e, 0x4a, 0xe2, 0x66, 0x2d, 0xcc, 0xb1, 0xa1, 0xad, 0x6a, 0x6b, 0x95, 0xfa, + 0xfb, 0x56, 0x12, 0xd3, 0x91, 0x83, 0x56, 0x70, 0xd4, 0x16, 0x02, 0x66, 0x09, 0xb6, 0xd5, 0x5f, + 0xb7, 0x3e, 0x92, 0xb6, 0x1e, 0x13, 0x8e, 0x1b, 0xf0, 0x74, 0x50, 0xcb, 0x45, 0x83, 0x1a, 0x48, + 0x64, 0x68, 0x64, 0x15, 0x1e, 0x80, 0x02, 0x0b, 0x88, 0x63, 0xe8, 0xd2, 0xfa, 0x5d, 0x6b, 0x4a, + 0xc6, 0xac, 0x0c, 0x0f, 0xf7, 0x02, 0xe2, 0x34, 0xae, 0xa9, 0x13, 0x0a, 0xe2, 0x0f, 0x49, 0x7b, + 0xf0, 0x33, 0x30, 0xc3, 0x38, 0xe6, 0x21, 0x33, 0xf2, 0xd2, 0xf2, 0xfd, 0x2b, 0x5b, 0x96, 0xda, + 0x8d, 0xff, 0x29, 0xdb, 0x33, 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc0, 0xd2, 0x13, 0xdf, 0x43, + 0x84, 0xf9, 0x21, 0x75, 0xc8, 0x16, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x0a, 0x0a, 0x01, + 0xe6, 0x1d, 0x19, 0xae, 0x72, 0xe2, 0xda, 0x53, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, + 0x29, 0xaf, 0x9c, 0x62, 0x1c, 0x10, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x04, 0xf3, 0x29, 0xe3, 0x28, + 0xec, 0xca, 0x8c, 0x0a, 0x68, 0x2c, 0xa3, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x82, 0x79, 0x2f, + 0xd1, 0xd9, 0x47, 0x8f, 0x98, 0xa1, 0x4b, 0xea, 0x62, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, + 0xe7, 0x9a, 0xbf, 0xe8, 0x00, 0x66, 0xdc, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, + 0xd4, 0x95, 0xae, 0x2b, 0x87, 0xcb, 0x4f, 0x86, 0x00, 0x4a, 0x38, 0xaf, 0xbe, 0x1c, 0x7c, 0x0b, + 0x14, 0xdb, 0xd4, 0x0f, 0x03, 0x99, 0x98, 0x72, 0x63, 0x4e, 0x51, 0x8a, 0x1f, 0x0a, 0x21, 0x8a, + 0x31, 0x78, 0x1b, 0xcc, 0xf6, 0x09, 0x65, 0xae, 0xef, 0x19, 0x05, 0x49, 0x9b, 0x57, 0xb4, 0xd9, + 0x83, 0x58, 0x8c, 0x86, 0x38, 0xbc, 0x03, 0x4a, 0x54, 0x39, 0x6e, 0x14, 0x25, 0x77, 0x41, 0x71, + 0x4b, 0xa3, 0x08, 0x8e, 0x18, 0xf0, 0x1e, 0xa8, 0xb0, 0xb0, 0x39, 0x52, 0x98, 0x91, 0x0a, 0x8b, + 0x4a, 0xa1, 0xb2, 0x97, 0x40, 0x28, 0xcd, 0x13, 0xd7, 0x12, 0x77, 0x34, 0x66, 0xc7, 0xaf, 0x25, + 0x42, 0x80, 0x24, 0x62, 0xfe, 0xaa, 0x81, 0x6b, 0x57, 0xcb, 0xd8, 0xbb, 0xa0, 0x8c, 0x03, 0x57, + 0x5e, 0x7b, 0x98, 0xab, 0x39, 0x11, 0xd7, 0xad, 0xa7, 0xbb, 0xb1, 0x10, 0x25, 0xb8, 0x20, 0x0f, + 0x9d, 0x11, 0x25, 0x3d, 0x22, 0x0f, 0x8f, 0x64, 0x28, 0xc1, 0xe1, 0x06, 0x98, 0x1b, 0xfe, 0xc8, + 0x24, 0x19, 0x05, 0xa9, 0x70, 0x3d, 0x1a, 0xd4, 0xe6, 0x50, 0x1a, 0x40, 0xe3, 0x3c, 0xf3, 0x67, + 0x1d, 0x2c, 0xef, 0x91, 0xee, 0xe1, 0xeb, 0x99, 0x05, 0xcf, 0xc6, 0x66, 0xc1, 0xe6, 0xf4, 0x8e, + 0xcd, 0xf6, 0xf2, 0xb5, 0xcd, 0x83, 0xef, 0x75, 0x70, 0x6b, 0x8a, 0x4f, 0xf0, 0x18, 0x40, 0x7a, + 0xa1, 0xbd, 0x54, 0x1c, 0xed, 0xa9, 0xbe, 0x5c, 0xec, 0xca, 0xc6, 0x8d, 0x68, 0x50, 0xcb, 0xe8, + 0x56, 0x94, 0x71, 0x04, 0xfc, 0x46, 0x03, 0x4b, 0x5e, 0xd6, 0xa4, 0x52, 0x61, 0xae, 0x4f, 0x3d, + 0x3c, 0x73, 0xc6, 0x35, 0x6e, 0x46, 0x83, 0x5a, 0xf6, 0xf8, 0x43, 0xd9, 0x67, 0x89, 0x57, 0xe6, + 0x46, 0x2a, 0x3c, 0xa2, 0x41, 0xfe, 0xbb, 0xba, 0xfa, 0x78, 0xac, 0xae, 0x36, 0x2e, 0x5b, 0x57, + 0x29, 0x27, 0x27, 0x96, 0xd5, 0xa7, 0xe7, 0xca, 0xea, 0xde, 0x65, 0xca, 0x2a, 0x6d, 0x78, 0x7a, + 0x55, 0x3d, 0x06, 0x2b, 0x93, 0x1d, 0xba, 0xf2, 0x70, 0x36, 0x7f, 0xd4, 0xc1, 0xe2, 0x9b, 0x67, + 0xfe, 0x2a, 0x6d, 0xfd, 0x5b, 0x01, 0x2c, 0xbf, 0x69, 0xe9, 0x49, 0x8b, 0x4e, 0xc8, 0x08, 0x55, + 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x50, 0x24, 0x11, 0x68, 0x82, 0x99, 0x76, 0xfc, 0xba, 0xc5, + 0xef, 0x0f, 0x10, 0x01, 0x56, 0x4f, 0x9b, 0x42, 0x60, 0x0b, 0x14, 0x89, 0xd8, 0x5b, 0x8d, 0xe2, + 0x6a, 0x7e, 0xad, 0x52, 0xff, 0xe0, 0xdf, 0x54, 0x86, 0x25, 0x37, 0xdf, 0x1d, 0x8f, 0xd3, 0x93, + 0x64, 0x9d, 0x90, 0x32, 0x14, 0x1b, 0x87, 0xff, 0x07, 0xf9, 0xd0, 0x6d, 0xa9, 0xd7, 0xbe, 0xa2, + 0x28, 0xf9, 0xfd, 0xdd, 0x6d, 0x24, 0xe4, 0x2b, 0x58, 0x2d, 0xcf, 0xd2, 0x04, 0x5c, 0x00, 0xf9, + 0x23, 0x72, 0x12, 0x37, 0x14, 0x12, 0x9f, 0xf0, 0x21, 0x28, 0xf6, 0xc5, 0x5e, 0xad, 0xe2, 0xfb, + 0xce, 0x54, 0x27, 0x93, 0x35, 0x1c, 0xc5, 0x5a, 0x0f, 0xf4, 0x4d, 0xcd, 0xfc, 0x5d, 0x03, 0x37, + 0x27, 0x96, 0x9f, 0x58, 0x77, 0x70, 0xb7, 0xeb, 0x1f, 0x93, 0x96, 0x3c, 0xb6, 0x94, 0xac, 0x3b, + 0x5b, 0xb1, 0x18, 0x0d, 0x71, 0xf8, 0x36, 0x98, 0x69, 0x11, 0xcf, 0x25, 0x2d, 0xb9, 0x18, 0x95, + 0x92, 0xca, 0xdd, 0x96, 0x52, 0xa4, 0x50, 0xc1, 0xa3, 0x04, 0x33, 0xdf, 0x53, 0xab, 0xd8, 0x88, + 0x87, 0xa4, 0x14, 0x29, 0x14, 0x6e, 0x81, 0x79, 0x22, 0xdc, 0x94, 0xfe, 0xef, 0x50, 0xea, 0x0f, + 0x33, 0xba, 0xac, 0x14, 0xe6, 0x77, 0xc6, 0x61, 0x74, 0x9e, 0x6f, 0xfe, 0xad, 0x03, 0x63, 0xd2, + 0x68, 0x83, 0x87, 0xc9, 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2f, 0xd5, 0x20, 0x42, + 0xa3, 0xb1, 0xa4, 0x1c, 0x99, 0x4b, 0x4b, 0x53, 0xab, 0x8b, 0xfc, 0x85, 0x14, 0x2c, 0x78, 0xe3, + 0x3b, 0x73, 0xbc, 0x54, 0x55, 0xea, 0x77, 0x2e, 0xdb, 0x0e, 0xf2, 0x34, 0x43, 0x9d, 0xb6, 0x70, + 0x0e, 0x60, 0xe8, 0x82, 0x7d, 0x58, 0x07, 0xc0, 0xf5, 0x1c, 0xbf, 0x17, 0x74, 0x09, 0x27, 0x32, + 0x6c, 0xa5, 0x64, 0x0e, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xc5, 0xbb, 0x70, 0xb5, 0x78, 0x37, + 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, + 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, + 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x99, 0x87, 0xb8, 0x24, + 0x47, 0x0f, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -158,32 +545,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -191,41 +577,52 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -233,25 +630,32 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,47 +663,40 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -307,45 +704,57 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -353,77 +762,58 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -431,41 +821,52 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -473,37 +874,46 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,41 +921,52 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -553,21 +974,27 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -575,41 +1002,52 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -617,91 +1055,94 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) } - i += n15 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } - i += n16 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) if len(m.Groups) > 0 { - for _, s := range m.Groups { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n17 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + return len(dAtA) - i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -709,41 +1150,48 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { + i-- + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) + i-- dAtA[i] = 0x20 - i++ - if m.Denied { + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -751,59 +1199,74 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -816,6 +1279,9 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -828,6 +1294,9 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -838,6 +1307,9 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -856,6 +1328,9 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -876,6 +1351,9 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -906,6 +1384,9 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -918,6 +1399,9 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -932,6 +1416,9 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -944,6 +1431,9 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -952,6 +1442,9 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -964,6 +1457,9 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -997,6 +1493,9 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1009,6 +1508,9 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1030,14 +1532,7 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1047,7 +1542,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1110,7 +1605,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1122,8 +1617,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1133,7 +1628,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1155,7 +1650,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1177,8 +1672,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1204,9 +1699,19 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1236,7 +1741,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1264,7 +1769,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1274,6 +1779,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1288,6 +1796,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1315,7 +1826,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1343,7 +1854,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1352,6 +1863,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1373,7 +1887,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1382,6 +1896,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1403,7 +1920,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1412,6 +1929,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1428,6 +1948,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1455,7 +1978,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1483,7 +2006,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1493,6 +2016,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1512,7 +2038,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1522,6 +2048,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1536,6 +2065,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1563,7 +2095,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1591,7 +2123,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1601,6 +2133,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1620,7 +2155,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1630,6 +2165,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1644,6 +2182,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1671,7 +2212,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1699,7 +2240,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1709,6 +2250,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1728,7 +2272,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1738,6 +2282,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1757,7 +2304,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1767,6 +2314,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1786,7 +2336,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1796,6 +2346,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1815,7 +2368,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1825,6 +2378,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1844,7 +2400,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1854,6 +2410,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1873,7 +2432,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1883,6 +2442,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1897,6 +2459,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1924,7 +2489,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1952,7 +2517,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1962,6 +2527,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1981,7 +2549,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1991,6 +2559,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2010,7 +2581,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2020,6 +2591,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2039,7 +2613,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2049,6 +2623,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2063,6 +2640,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2090,7 +2670,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2118,7 +2698,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2127,6 +2707,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2148,7 +2731,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2157,6 +2740,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2178,7 +2764,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2187,6 +2773,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2203,6 +2792,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2230,7 +2822,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2258,7 +2850,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2267,6 +2859,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2291,7 +2886,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2300,6 +2895,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2319,6 +2917,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2346,7 +2947,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2374,7 +2975,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2383,6 +2984,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2404,7 +3008,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2413,6 +3017,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2434,7 +3041,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2443,6 +3050,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2459,6 +3069,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2486,7 +3099,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2514,7 +3127,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2524,6 +3137,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2538,6 +3154,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2565,7 +3184,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2593,7 +3212,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2602,6 +3221,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2623,7 +3245,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2632,6 +3254,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2653,7 +3278,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2662,6 +3287,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2678,6 +3306,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2705,7 +3336,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2733,7 +3364,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2742,6 +3373,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2766,7 +3400,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2775,6 +3409,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2799,7 +3436,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2809,6 +3446,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2828,7 +3468,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2838,6 +3478,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2857,7 +3500,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2866,6 +3509,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2886,7 +3532,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2903,7 +3549,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2913,6 +3559,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2929,7 +3578,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2938,7 +3587,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2980,7 +3629,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2990,6 +3639,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3004,6 +3656,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3031,7 +3686,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3059,7 +3714,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3079,7 +3734,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3089,6 +3744,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3108,7 +3766,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3118,6 +3776,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3137,7 +3798,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3152,6 +3813,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3179,7 +3843,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3207,7 +3871,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3216,6 +3880,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3238,7 +3905,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3247,6 +3914,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3269,7 +3939,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3289,7 +3959,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3299,6 +3969,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3313,6 +3986,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3379,10 +4055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3411,6 +4090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3429,83 +4111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1140 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xff, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xdb, 0xd1, - 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, - 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, - 0xc2, 0xa9, 0x12, 0x5f, 0x80, 0x23, 0x07, 0x0e, 0x7c, 0x03, 0x2e, 0x48, 0xdc, 0x38, 0x70, 0x40, - 0x39, 0xf6, 0x58, 0x24, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, - 0x9b, 0x70, 0xa0, 0x97, 0xde, 0x76, 0x9f, 0xdf, 0xef, 0x79, 0x99, 0xe7, 0x65, 0xe6, 0x01, 0xdb, - 0x47, 0x9b, 0xcc, 0x72, 0x7d, 0xfb, 0x28, 0x6c, 0x12, 0xea, 0x11, 0x4e, 0x98, 0xdd, 0x27, 0x5e, - 0xcb, 0xa7, 0xb6, 0x02, 0x70, 0xe0, 0xda, 0x38, 0xe4, 0x1d, 0x9f, 0xba, 0x5f, 0x63, 0xee, 0xfa, - 0x9e, 0xdd, 0x5f, 0xb7, 0xdb, 0xc4, 0x23, 0x14, 0x73, 0xd2, 0xb2, 0x02, 0xea, 0x73, 0x1f, 0xde, - 0x8a, 0xc9, 0x16, 0x0e, 0x5c, 0x6b, 0x8c, 0x6c, 0xf5, 0xd7, 0x57, 0xde, 0x6b, 0xbb, 0xbc, 0x13, - 0x36, 0x2d, 0xc7, 0xef, 0xd9, 0x6d, 0xbf, 0xed, 0xdb, 0x52, 0xa7, 0x19, 0x1e, 0xca, 0x3f, 0xf9, - 0x23, 0xbf, 0x62, 0x5b, 0x2b, 0x77, 0x13, 0xc7, 0x3d, 0xec, 0x74, 0x5c, 0x8f, 0xd0, 0x13, 0x3b, - 0x38, 0x6a, 0x0b, 0x01, 0xb3, 0x7b, 0x84, 0xe3, 0x8c, 0x08, 0x56, 0xec, 0x49, 0x5a, 0x34, 0xf4, - 0xb8, 0xdb, 0x23, 0x17, 0x14, 0xee, 0xbf, 0x4a, 0x81, 0x39, 0x1d, 0xd2, 0xc3, 0xe7, 0xf5, 0xcc, - 0x0d, 0x00, 0x76, 0xbe, 0xe2, 0x14, 0x1f, 0xe0, 0x6e, 0x48, 0x60, 0x0d, 0x14, 0x5d, 0x4e, 0x7a, - 0xcc, 0xd0, 0x56, 0xf3, 0x6b, 0xe5, 0x46, 0x39, 0x1a, 0xd4, 0x8a, 0xbb, 0x42, 0x80, 0x62, 0xf9, - 0x83, 0xd2, 0x77, 0x3f, 0xd4, 0x72, 0xcf, 0xff, 0x58, 0xcd, 0x99, 0x3f, 0xe9, 0xc0, 0x78, 0xe4, - 0x3b, 0xb8, 0xbb, 0x17, 0x36, 0xbf, 0x20, 0x0e, 0xdf, 0x72, 0x1c, 0xc2, 0x18, 0x22, 0x7d, 0x97, - 0x1c, 0xc3, 0xcf, 0x41, 0x49, 0x9c, 0xac, 0x85, 0x39, 0x36, 0xb4, 0x55, 0x6d, 0xad, 0x52, 0x7f, - 0xdf, 0x4a, 0x72, 0x3a, 0x0a, 0xd0, 0x0a, 0x8e, 0xda, 0x42, 0xc0, 0x2c, 0xc1, 0xb6, 0xfa, 0xeb, - 0xd6, 0x47, 0xd2, 0xd6, 0x63, 0xc2, 0x71, 0x03, 0x9e, 0x0e, 0x6a, 0xb9, 0x68, 0x50, 0x03, 0x89, - 0x0c, 0x8d, 0xac, 0xc2, 0x03, 0x50, 0x60, 0x01, 0x71, 0x0c, 0x5d, 0x5a, 0xbf, 0x6b, 0x4d, 0xa9, - 0x98, 0x95, 0x11, 0xe1, 0x5e, 0x40, 0x9c, 0xc6, 0x35, 0xe5, 0xa1, 0x20, 0xfe, 0x90, 0xb4, 0x07, - 0x3f, 0x03, 0x33, 0x8c, 0x63, 0x1e, 0x32, 0x23, 0x2f, 0x2d, 0xdf, 0xbf, 0xb2, 0x65, 0xa9, 0xdd, - 0xf8, 0x9f, 0xb2, 0x3d, 0x13, 0xff, 0x23, 0x65, 0xd5, 0xfc, 0x04, 0x2c, 0x3d, 0xf1, 0x3d, 0x44, - 0x98, 0x1f, 0x52, 0x87, 0x6c, 0x71, 0x4e, 0xdd, 0x66, 0xc8, 0x09, 0x83, 0xab, 0xa0, 0x10, 0x60, - 0xde, 0x91, 0xe9, 0x2a, 0x27, 0xa1, 0x3d, 0xc5, 0xbc, 0x83, 0x24, 0x22, 0x18, 0x7d, 0x42, 0x9b, - 0xf2, 0xc8, 0x29, 0xc6, 0x01, 0xa1, 0x4d, 0x24, 0x11, 0xf3, 0x4b, 0x30, 0x9f, 0x32, 0x8e, 0xc2, - 0xae, 0xac, 0xa8, 0x80, 0xc6, 0x2a, 0x2a, 0x34, 0x18, 0x8a, 0xe5, 0xf0, 0x21, 0x98, 0xf7, 0x12, - 0x9d, 0x7d, 0xf4, 0x88, 0x19, 0xba, 0xa4, 0x2e, 0x46, 0x83, 0x5a, 0xda, 0x9c, 0x80, 0xd0, 0x79, - 0xae, 0xf9, 0x8b, 0x0e, 0x60, 0xc6, 0x69, 0x6c, 0x50, 0xf6, 0x70, 0x8f, 0xb0, 0x00, 0x3b, 0x44, - 0x1d, 0xe9, 0xba, 0x0a, 0xb8, 0xfc, 0x64, 0x08, 0xa0, 0x84, 0xf3, 0xea, 0xc3, 0xc1, 0xb7, 0x40, - 0xb1, 0x4d, 0xfd, 0x30, 0x90, 0x85, 0x29, 0x37, 0xe6, 0x14, 0xa5, 0xf8, 0xa1, 0x10, 0xa2, 0x18, - 0x83, 0xb7, 0xc1, 0x6c, 0x9f, 0x50, 0xe6, 0xfa, 0x9e, 0x51, 0x90, 0xb4, 0x79, 0x45, 0x9b, 0x3d, - 0x88, 0xc5, 0x68, 0x88, 0xc3, 0x3b, 0xa0, 0x44, 0x55, 0xe0, 0x46, 0x51, 0x72, 0x17, 0x14, 0xb7, - 0x34, 0xca, 0xe0, 0x88, 0x01, 0xef, 0x81, 0x0a, 0x0b, 0x9b, 0x23, 0x85, 0x19, 0xa9, 0xb0, 0xa8, - 0x14, 0x2a, 0x7b, 0x09, 0x84, 0xd2, 0x3c, 0x71, 0x2c, 0x71, 0x46, 0x63, 0x76, 0xfc, 0x58, 0x22, - 0x05, 0x48, 0x22, 0xe6, 0xaf, 0x1a, 0xb8, 0x76, 0xb5, 0x8a, 0xbd, 0x0b, 0xca, 0x38, 0x70, 0xe5, - 0xb1, 0x87, 0xb5, 0x9a, 0x13, 0x79, 0xdd, 0x7a, 0xba, 0x1b, 0x0b, 0x51, 0x82, 0x0b, 0xf2, 0x30, - 0x18, 0xd1, 0xd2, 0x23, 0xf2, 0xd0, 0x25, 0x43, 0x09, 0x0e, 0x37, 0xc0, 0xdc, 0xf0, 0x47, 0x16, - 0xc9, 0x28, 0x48, 0x85, 0xeb, 0xd1, 0xa0, 0x36, 0x87, 0xd2, 0x00, 0x1a, 0xe7, 0x99, 0x3f, 0xeb, - 0x60, 0x79, 0x8f, 0x74, 0x0f, 0x5f, 0xcf, 0x5d, 0xf0, 0x6c, 0xec, 0x2e, 0xd8, 0x9c, 0x3e, 0xb1, - 0xd9, 0x51, 0xbe, 0xb6, 0xfb, 0xe0, 0x7b, 0x1d, 0xdc, 0x9a, 0x12, 0x13, 0x3c, 0x06, 0x90, 0x5e, - 0x18, 0x2f, 0x95, 0x47, 0x7b, 0x6a, 0x2c, 0x17, 0xa7, 0xb2, 0x71, 0x23, 0x1a, 0xd4, 0x32, 0xa6, - 0x15, 0x65, 0xb8, 0x80, 0xdf, 0x68, 0x60, 0xc9, 0xcb, 0xba, 0xa9, 0x54, 0x9a, 0xeb, 0x53, 0x9d, - 0x67, 0xde, 0x71, 0x8d, 0x9b, 0xd1, 0xa0, 0x96, 0x7d, 0xfd, 0xa1, 0x6c, 0x5f, 0xe2, 0x95, 0xb9, - 0x91, 0x4a, 0x8f, 0x18, 0x90, 0xff, 0xae, 0xaf, 0x3e, 0x1e, 0xeb, 0xab, 0x8d, 0xcb, 0xf6, 0x55, - 0x2a, 0xc8, 0x89, 0x6d, 0xf5, 0xe9, 0xb9, 0xb6, 0xba, 0x77, 0x99, 0xb6, 0x4a, 0x1b, 0x9e, 0xde, - 0x55, 0x8f, 0xc1, 0xca, 0xe4, 0x80, 0xae, 0x7c, 0x39, 0x9b, 0x3f, 0xea, 0x60, 0xf1, 0xcd, 0x33, - 0x7f, 0x95, 0xb1, 0xfe, 0xad, 0x00, 0x96, 0xdf, 0x8c, 0xf4, 0xa4, 0x45, 0x27, 0x64, 0x84, 0xaa, - 0x67, 0x7c, 0x54, 0x9c, 0x7d, 0x46, 0x28, 0x92, 0x08, 0x34, 0xc1, 0x4c, 0x3b, 0x7e, 0xdd, 0xe2, - 0xf7, 0x07, 0x88, 0x04, 0xab, 0xa7, 0x4d, 0x21, 0xb0, 0x05, 0x8a, 0x44, 0xec, 0xad, 0x46, 0x71, - 0x35, 0xbf, 0x56, 0xa9, 0x7f, 0xf0, 0x6f, 0x3a, 0xc3, 0x92, 0x9b, 0xef, 0x8e, 0xc7, 0xe9, 0x49, - 0xb2, 0x4e, 0x48, 0x19, 0x8a, 0x8d, 0xc3, 0xff, 0x83, 0x7c, 0xe8, 0xb6, 0xd4, 0x6b, 0x5f, 0x51, - 0x94, 0xfc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x15, 0xac, 0x96, 0x67, 0x69, 0x02, 0x2e, 0x80, 0xfc, - 0x11, 0x39, 0x89, 0x07, 0x0a, 0x89, 0x4f, 0xf8, 0x10, 0x14, 0xfb, 0x62, 0xaf, 0x56, 0xf9, 0x7d, - 0x67, 0x6a, 0x90, 0xc9, 0x1a, 0x8e, 0x62, 0xad, 0x07, 0xfa, 0xa6, 0x66, 0xfe, 0xae, 0x81, 0x9b, - 0x13, 0xdb, 0x4f, 0xac, 0x3b, 0xb8, 0xdb, 0xf5, 0x8f, 0x49, 0x4b, 0xba, 0x2d, 0x25, 0xeb, 0xce, - 0x56, 0x2c, 0x46, 0x43, 0x1c, 0xbe, 0x0d, 0x66, 0x28, 0xc1, 0xcc, 0xf7, 0xd4, 0x8a, 0x35, 0xea, - 0x5c, 0x24, 0xa5, 0x48, 0xa1, 0x70, 0x0b, 0xcc, 0x13, 0xe1, 0x5e, 0xc6, 0xb5, 0x43, 0xa9, 0x3f, - 0xac, 0xd4, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0xbe, 0x70, 0xd5, 0x22, 0x9e, 0x4b, - 0x5a, 0x72, 0x07, 0x2b, 0x25, 0xae, 0xb6, 0xa5, 0x14, 0x29, 0xd4, 0xfc, 0x5b, 0x07, 0xc6, 0xa4, - 0xab, 0x0d, 0x1e, 0x26, 0xbb, 0x88, 0x04, 0xe5, 0x3a, 0x54, 0xa9, 0xdf, 0xbe, 0xd4, 0x80, 0x08, - 0x8d, 0xc6, 0x92, 0x72, 0x3b, 0x97, 0x96, 0xa6, 0x56, 0x17, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, - 0x77, 0xe6, 0x78, 0xa9, 0xaa, 0xd4, 0xef, 0x5c, 0x76, 0x1c, 0xa4, 0x37, 0x43, 0x79, 0x5b, 0x38, - 0x07, 0x30, 0x74, 0xc1, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, - 0xde, 0x52, 0x72, 0x0f, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xd5, 0xa5, 0x70, 0xb5, 0xba, 0x34, - 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, - 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, - 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x85, 0x45, 0x74, - 0x47, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 7cce98eb..f0def20b 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -17,40 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -63,74 +44,398 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_43130d8376f09103, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_43130d8376f09103, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) } -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_43130d8376f09103, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) } -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_43130d8376f09103, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_43130d8376f09103, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) } +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_43130d8376f09103, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) } +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") @@ -144,13 +449,95 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_43130d8376f09103) +} + +var fileDescriptor_43130d8376f09103 = []byte{ + // 1141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, + 0x05, 0xd1, 0xee, 0x92, 0xa8, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, 0x44, + 0xc9, 0x81, 0x4a, 0xc0, 0xec, 0x7a, 0x62, 0x2f, 0xb6, 0x77, 0x97, 0x99, 0x59, 0x87, 0x20, 0x0e, + 0x3d, 0x72, 0xe4, 0xc8, 0x91, 0x13, 0xdf, 0x81, 0x0b, 0x12, 0x9c, 0x72, 0xec, 0x31, 0x48, 0xc8, + 0x22, 0xcb, 0x87, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, + 0xdb, 0x79, 0xbf, 0xdf, 0x7b, 0xf3, 0xde, 0x9b, 0xf7, 0xde, 0x3e, 0xb0, 0xdb, 0x79, 0xc8, 0x0c, + 0xc7, 0x33, 0x3b, 0x81, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0x66, 0x9f, 0xb8, 0x4d, 0x8f, 0x9a, 0x0a, + 0xc0, 0xbe, 0x63, 0xe2, 0x80, 0xb7, 0x3d, 0xea, 0x7c, 0x87, 0xb9, 0xe3, 0xb9, 0x66, 0x7f, 0xcd, + 0x22, 0x1c, 0xaf, 0x99, 0x2d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x1a, 0x3e, 0xf5, 0xb8, 0x07, 0x6b, + 0x91, 0x86, 0x81, 0x7d, 0xc7, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xca, 0xfd, 0x96, 0xc3, 0xdb, 0x81, + 0x65, 0xd8, 0x5e, 0xcf, 0x6c, 0x79, 0x2d, 0xcf, 0x94, 0x8a, 0x56, 0x70, 0x28, 0x4f, 0xf2, 0x20, + 0xbf, 0x22, 0x83, 0x2b, 0x0f, 0x62, 0x17, 0x7a, 0xd8, 0x6e, 0x3b, 0x2e, 0xa1, 0xc7, 0xa6, 0xdf, + 0x69, 0x09, 0x01, 0x33, 0x7b, 0x84, 0x63, 0xb3, 0x7f, 0xc1, 0x8d, 0x15, 0xf3, 0x32, 0x2d, 0x1a, + 0xb8, 0xdc, 0xe9, 0x91, 0x0b, 0x0a, 0x1f, 0x4e, 0x52, 0x60, 0x76, 0x9b, 0xf4, 0xf0, 0x79, 0xbd, + 0xfa, 0x06, 0x00, 0x3b, 0xdf, 0x72, 0x8a, 0x0f, 0x70, 0x37, 0x20, 0xb0, 0x0a, 0x0a, 0x0e, 0x27, + 0x3d, 0xa6, 0x6b, 0xb5, 0xdc, 0x6a, 0xa9, 0x51, 0x0a, 0x07, 0xd5, 0xc2, 0xae, 0x10, 0xa0, 0x48, + 0xbe, 0x59, 0xfc, 0xe9, 0xe7, 0x6a, 0xe6, 0xc5, 0x5f, 0xb5, 0x4c, 0xfd, 0xb7, 0x2c, 0xd0, 0x1f, + 0x7b, 0x36, 0xee, 0xee, 0x05, 0xd6, 0xd7, 0xc4, 0xe6, 0x5b, 0xb6, 0x4d, 0x18, 0x43, 0xa4, 0xef, + 0x90, 0x23, 0xf8, 0x15, 0x28, 0x8a, 0xc8, 0x9a, 0x98, 0x63, 0x5d, 0xab, 0x69, 0xab, 0xe5, 0xf5, + 0xf7, 0x8d, 0x38, 0xb1, 0x23, 0x07, 0x0d, 0xbf, 0xd3, 0x12, 0x02, 0x66, 0x08, 0xb6, 0xd1, 0x5f, + 0x33, 0x3e, 0x93, 0xb6, 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, + 0x65, 0x68, 0x64, 0x15, 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xeb, 0x1f, 0x19, 0x93, + 0x9e, 0xcd, 0x48, 0x71, 0x73, 0xcf, 0x27, 0x76, 0xe3, 0x96, 0xba, 0x26, 0x2f, 0x4e, 0x48, 0x1a, + 0x85, 0x36, 0x98, 0x61, 0x1c, 0xf3, 0x80, 0xe9, 0x39, 0x69, 0xfe, 0xe3, 0x9b, 0x99, 0x97, 0x26, + 0x1a, 0x6f, 0xa8, 0x0b, 0x66, 0xa2, 0x33, 0x52, 0xa6, 0xeb, 0xcf, 0xc1, 0xd2, 0x53, 0xcf, 0x45, + 0x84, 0x79, 0x01, 0xb5, 0xc9, 0x16, 0xe7, 0xd4, 0xb1, 0x02, 0x4e, 0x18, 0xac, 0x81, 0xbc, 0x8f, + 0x79, 0x5b, 0x26, 0xae, 0x14, 0xfb, 0xf7, 0x0c, 0xf3, 0x36, 0x92, 0x88, 0x60, 0xf4, 0x09, 0xb5, + 0x64, 0xf0, 0x09, 0xc6, 0x01, 0xa1, 0x16, 0x92, 0x48, 0xfd, 0x1b, 0x30, 0x9f, 0x30, 0x8e, 0x82, + 0xae, 0x7c, 0x5b, 0x01, 0x8d, 0xbd, 0xad, 0xd0, 0x60, 0x28, 0x92, 0xc3, 0x47, 0x60, 0xde, 0x8d, + 0x75, 0xf6, 0xd1, 0x63, 0xa6, 0x67, 0x25, 0x75, 0x31, 0x1c, 0x54, 0x93, 0xe6, 0x04, 0x84, 0xce, + 0x73, 0x45, 0x41, 0xc0, 0x94, 0x68, 0x4c, 0x50, 0x72, 0x71, 0x8f, 0x30, 0x1f, 0xdb, 0x44, 0x85, + 0x74, 0x5b, 0x39, 0x5c, 0x7a, 0x3a, 0x04, 0x50, 0xcc, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, + 0xd4, 0x0b, 0x7c, 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0xa2, 0x14, 0x3e, 0x15, 0x42, 0x14, 0x61, 0xf0, + 0x5d, 0x30, 0xdb, 0x27, 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0x6d, 0x5e, 0xd1, 0x66, 0x0f, 0x22, + 0x31, 0x1a, 0xe2, 0xf0, 0x1e, 0x28, 0x52, 0xe5, 0xb8, 0x5e, 0x90, 0xdc, 0x05, 0xc5, 0x2d, 0x8e, + 0x32, 0x38, 0x62, 0xc0, 0x0f, 0x40, 0x99, 0x05, 0xd6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, + 0xf2, 0x5e, 0x0c, 0xa1, 0x24, 0x4f, 0x84, 0x25, 0x62, 0xd4, 0x67, 0xc7, 0xc3, 0x12, 0x29, 0x40, + 0x12, 0xa9, 0xff, 0xa1, 0x81, 0x5b, 0xd3, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x86, 0x3d, + 0x7c, 0xab, 0x39, 0x91, 0xd7, 0xad, 0x67, 0xbb, 0x91, 0x10, 0xc5, 0xb8, 0x20, 0x0f, 0x9d, 0x11, + 0x75, 0x3d, 0x22, 0x0f, 0xaf, 0x64, 0x28, 0xc6, 0xe1, 0x06, 0x98, 0x1b, 0x1e, 0xe4, 0x23, 0xe9, + 0x79, 0xa9, 0x70, 0x3b, 0x1c, 0x54, 0xe7, 0x50, 0x12, 0x40, 0xe3, 0xbc, 0xfa, 0xef, 0x59, 0xb0, + 0xbc, 0x47, 0xba, 0x87, 0xaf, 0x66, 0x2a, 0x7c, 0x39, 0x36, 0x15, 0x1e, 0x5d, 0xa3, 0x6d, 0xd3, + 0x5d, 0x7d, 0xb5, 0x93, 0xe1, 0x97, 0x2c, 0x78, 0xf3, 0x0a, 0xc7, 0xe0, 0xf7, 0x00, 0xd2, 0x0b, + 0x8d, 0xa6, 0x32, 0xfa, 0x60, 0xb2, 0x43, 0x17, 0x9b, 0xb4, 0x71, 0x27, 0x1c, 0x54, 0x53, 0x9a, + 0x17, 0xa5, 0xdc, 0x03, 0x7f, 0xd0, 0xc0, 0x92, 0x9b, 0x36, 0xb8, 0x54, 0xd6, 0x37, 0x26, 0x7b, + 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xa2, 0xf4, 0x0b, 0xc5, 0xc8, 0xb9, + 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xaf, 0xd6, 0xbe, 0x18, 0xab, 0xb5, 0x4f, 0xa6, 0xaa, 0xb5, + 0x84, 0xa7, 0x97, 0x96, 0x9a, 0x75, 0xae, 0xd4, 0x36, 0xaf, 0x5d, 0x6a, 0x49, 0xeb, 0x57, 0x57, + 0xda, 0x13, 0xb0, 0x72, 0xb9, 0x57, 0x53, 0x8f, 0xee, 0xfa, 0xaf, 0x59, 0xb0, 0xf8, 0x7a, 0x1d, + 0xb8, 0x59, 0xd3, 0x9f, 0xe6, 0xc1, 0xf2, 0xeb, 0x86, 0xbf, 0xba, 0xe1, 0xc5, 0x4f, 0x34, 0x60, + 0x84, 0xaa, 0x1f, 0xff, 0xe8, 0xad, 0xf6, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x36, 0xdc, 0x0d, 0xa2, + 0x1f, 0x16, 0x10, 0x99, 0x56, 0xff, 0x42, 0xb5, 0x18, 0x38, 0xa0, 0x40, 0xc4, 0xc6, 0xab, 0x17, + 0x6a, 0xb9, 0xd5, 0xf2, 0xfa, 0xf6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, 0x8e, 0xcb, 0xe9, 0x71, + 0xbc, 0x83, 0x48, 0x19, 0x8a, 0x6e, 0x80, 0x6f, 0x81, 0x5c, 0xe0, 0x34, 0xd5, 0x8a, 0x50, 0x56, + 0x94, 0xdc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, + 0x3a, 0xe4, 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0xfa, 0x62, 0x2d, 0x57, 0x79, 0xbe, + 0x37, 0xd9, 0xd3, 0x78, 0x95, 0x47, 0x91, 0xea, 0x66, 0xf6, 0xa1, 0x56, 0xff, 0x53, 0x03, 0x77, + 0x2f, 0x2d, 0x48, 0xb1, 0x28, 0xe1, 0x6e, 0xd7, 0x3b, 0x22, 0x4d, 0x79, 0x77, 0x31, 0x5e, 0x94, + 0xb6, 0x22, 0x31, 0x1a, 0xe2, 0xf0, 0x1d, 0x30, 0xd3, 0x24, 0xae, 0x43, 0x9a, 0x72, 0xa5, 0x2a, + 0xc6, 0xb5, 0xbc, 0x2d, 0xa5, 0x48, 0xa1, 0x82, 0x47, 0x09, 0x66, 0x9e, 0xab, 0x96, 0xb8, 0x11, + 0x0f, 0x49, 0x29, 0x52, 0x28, 0xdc, 0x02, 0xf3, 0x44, 0xb8, 0x29, 0x83, 0xd8, 0xa1, 0xd4, 0x1b, + 0xbe, 0xec, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0x7e, 0xfd, 0xdf, 0x2c, 0xd0, 0x2f, + 0x1b, 0x7b, 0xb0, 0x13, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xdd, 0xb8, 0x7e, 0xcb, 0x08, + 0xb5, 0xc6, 0x92, 0xf2, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, + 0xbe, 0x72, 0x47, 0x3b, 0x59, 0x79, 0x7d, 0x6d, 0xaa, 0x06, 0x91, 0x57, 0xea, 0xea, 0xca, 0x85, + 0x73, 0x00, 0x43, 0x17, 0x2e, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, + 0x26, 0xb0, 0x18, 0x4f, 0xcb, 0xdd, 0x11, 0x82, 0x12, 0xac, 0xb4, 0xcc, 0xe7, 0xa7, 0xcb, 0x7c, + 0xe3, 0xfe, 0xc9, 0x59, 0x25, 0xf3, 0xf2, 0xac, 0x92, 0x39, 0x3d, 0xab, 0x64, 0x5e, 0x84, 0x15, + 0xed, 0x24, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x1a, 0x56, 0xb4, 0xbf, 0xc3, 0x8a, 0xf6, 0xe3, + 0x3f, 0x95, 0xcc, 0xe7, 0xb3, 0x2a, 0xc2, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc9, 0xa5, + 0x34, 0xa4, 0x0f, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -158,32 +545,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -191,41 +577,52 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -233,25 +630,32 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,47 +663,40 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -307,45 +704,57 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -353,77 +762,58 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -431,41 +821,52 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -473,37 +874,46 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,41 +921,52 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -553,21 +974,27 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -575,133 +1002,147 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n15 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n16 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n17 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -709,41 +1150,48 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { + i-- + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) + i-- dAtA[i] = 0x20 - i++ - if m.Denied { + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -751,59 +1199,74 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -816,6 +1279,9 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -828,6 +1294,9 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -838,6 +1307,9 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -856,6 +1328,9 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -876,6 +1351,9 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -906,6 +1384,9 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -918,6 +1399,9 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -932,6 +1416,9 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -944,6 +1431,9 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -952,6 +1442,9 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -964,6 +1457,9 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -997,6 +1493,9 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1009,6 +1508,9 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1030,14 +1532,7 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1047,7 +1542,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1110,7 +1605,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1122,8 +1617,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1133,7 +1628,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1155,7 +1650,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1177,8 +1672,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1204,9 +1699,19 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1236,7 +1741,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1264,7 +1769,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1274,6 +1779,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1288,6 +1796,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1315,7 +1826,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1343,7 +1854,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1352,6 +1863,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1373,7 +1887,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1382,6 +1896,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1403,7 +1920,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1412,6 +1929,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1428,6 +1948,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1455,7 +1978,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1483,7 +2006,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1493,6 +2016,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1512,7 +2038,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1522,6 +2048,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1536,6 +2065,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1563,7 +2095,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1591,7 +2123,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1601,6 +2133,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1620,7 +2155,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1630,6 +2165,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1644,6 +2182,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1671,7 +2212,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1699,7 +2240,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1709,6 +2250,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1728,7 +2272,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1738,6 +2282,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1757,7 +2304,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1767,6 +2314,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1786,7 +2336,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1796,6 +2346,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1815,7 +2368,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1825,6 +2378,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1844,7 +2400,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1854,6 +2410,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1873,7 +2432,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1883,6 +2442,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1897,6 +2459,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1924,7 +2489,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1952,7 +2517,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1962,6 +2527,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1981,7 +2549,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1991,6 +2559,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2010,7 +2581,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2020,6 +2591,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2039,7 +2613,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2049,6 +2623,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2063,6 +2640,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2090,7 +2670,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2118,7 +2698,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2127,6 +2707,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2148,7 +2731,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2157,6 +2740,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2178,7 +2764,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2187,6 +2773,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2203,6 +2792,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2230,7 +2822,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2258,7 +2850,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2267,6 +2859,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2291,7 +2886,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2300,6 +2895,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2319,6 +2917,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2346,7 +2947,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2374,7 +2975,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2383,6 +2984,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2404,7 +3008,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2413,6 +3017,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2434,7 +3041,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2443,6 +3050,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2459,6 +3069,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2486,7 +3099,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2514,7 +3127,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2524,6 +3137,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2538,6 +3154,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2565,7 +3184,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2593,7 +3212,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2602,6 +3221,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2623,7 +3245,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2632,6 +3254,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2653,7 +3278,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2662,6 +3287,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2678,6 +3306,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2705,7 +3336,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2733,7 +3364,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2742,6 +3373,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2766,7 +3400,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2775,6 +3409,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2799,7 +3436,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2809,6 +3446,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2828,7 +3468,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2838,6 +3478,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2857,7 +3500,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2866,6 +3509,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2886,7 +3532,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2903,7 +3549,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2913,6 +3559,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2929,7 +3578,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2938,7 +3587,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2980,7 +3629,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2990,6 +3639,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3004,6 +3656,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3031,7 +3686,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3059,7 +3714,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3079,7 +3734,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3089,6 +3744,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3108,7 +3766,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3118,6 +3776,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3137,7 +3798,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3152,6 +3813,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3179,7 +3843,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3207,7 +3871,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3216,6 +3880,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3238,7 +3905,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3247,6 +3914,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3269,7 +3939,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3289,7 +3959,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3299,6 +3969,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3313,6 +3986,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3379,10 +4055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3411,6 +4090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3429,83 +4111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcb, 0x6f, 0x1b, 0x45, - 0x18, 0xf7, 0xfa, 0x91, 0xd8, 0xe3, 0x86, 0xa4, 0x13, 0xa5, 0xd9, 0x06, 0x61, 0x5b, 0x46, 0x42, - 0x41, 0xb4, 0xbb, 0x24, 0x2a, 0xa4, 0x04, 0x7a, 0x88, 0x95, 0x08, 0x45, 0x6a, 0x4b, 0x35, 0x51, - 0x72, 0xa0, 0x12, 0x30, 0xbb, 0x9e, 0xd8, 0x8b, 0xed, 0xdd, 0x65, 0x66, 0xd6, 0x21, 0x88, 0x43, - 0x8f, 0x1c, 0x39, 0x72, 0xe4, 0xc4, 0xff, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, 0x64, - 0x91, 0xe5, 0x8f, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, - 0xdb, 0xf9, 0x7e, 0xdf, 0xfb, 0xb5, 0x1f, 0xd8, 0xed, 0x3c, 0x64, 0x86, 0xe3, 0x99, 0x9d, 0xc0, - 0x22, 0xd4, 0x25, 0x9c, 0x30, 0xb3, 0x4f, 0xdc, 0xa6, 0x47, 0x4d, 0x05, 0x60, 0xdf, 0x31, 0x71, - 0xc0, 0xdb, 0x1e, 0x75, 0xbe, 0xc3, 0xdc, 0xf1, 0x5c, 0xb3, 0xbf, 0x66, 0x11, 0x8e, 0xd7, 0xcc, - 0x16, 0x71, 0x09, 0xc5, 0x9c, 0x34, 0x0d, 0x9f, 0x7a, 0xdc, 0x83, 0xb5, 0x48, 0xc2, 0xc0, 0xbe, - 0x63, 0x8c, 0x49, 0x18, 0x4a, 0x62, 0xe5, 0x7e, 0xcb, 0xe1, 0xed, 0xc0, 0x32, 0x6c, 0xaf, 0x67, - 0xb6, 0xbc, 0x96, 0x67, 0x4a, 0x41, 0x2b, 0x38, 0x94, 0x2f, 0xf9, 0x90, 0x5f, 0x91, 0xc2, 0x95, - 0x07, 0xb1, 0x0b, 0x3d, 0x6c, 0xb7, 0x1d, 0x97, 0xd0, 0x63, 0xd3, 0xef, 0xb4, 0x04, 0x81, 0x99, - 0x3d, 0xc2, 0xb1, 0xd9, 0xbf, 0xe0, 0xc6, 0x8a, 0x79, 0x99, 0x14, 0x0d, 0x5c, 0xee, 0xf4, 0xc8, - 0x05, 0x81, 0x0f, 0x27, 0x09, 0x30, 0xbb, 0x4d, 0x7a, 0xf8, 0xbc, 0x5c, 0x7d, 0x03, 0x80, 0x9d, - 0x6f, 0x39, 0xc5, 0x07, 0xb8, 0x1b, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x1e, 0xd3, 0xb5, 0x5a, - 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0x83, 0x6a, 0x61, 0x57, 0x10, 0x50, 0x44, 0xdf, 0x2c, 0xfe, 0xf4, - 0x73, 0x35, 0xf3, 0xe2, 0xaf, 0x5a, 0xa6, 0xfe, 0x5b, 0x16, 0xe8, 0x8f, 0x3d, 0x1b, 0x77, 0xf7, - 0x02, 0xeb, 0x6b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x77, 0xc8, 0x11, 0xfc, 0x0a, - 0x14, 0x45, 0x64, 0x4d, 0xcc, 0xb1, 0xae, 0xd5, 0xb4, 0xd5, 0xf2, 0xfa, 0xfb, 0x46, 0x9c, 0xd8, - 0x91, 0x83, 0x86, 0xdf, 0x69, 0x09, 0x02, 0x33, 0x04, 0xb7, 0xd1, 0x5f, 0x33, 0x3e, 0x93, 0xba, - 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, 0x69, 0x68, 0xa4, 0x15, - 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xed, 0x1f, 0x19, 0x93, 0xca, 0x66, 0xa4, 0xb8, - 0xb9, 0xe7, 0x13, 0xbb, 0x71, 0x4b, 0x99, 0xc9, 0x8b, 0x17, 0x92, 0x4a, 0xa1, 0x0d, 0x66, 0x18, - 0xc7, 0x3c, 0x60, 0x7a, 0x4e, 0xaa, 0xff, 0xf8, 0x66, 0xea, 0xa5, 0x8a, 0xc6, 0x1b, 0xca, 0xc0, - 0x4c, 0xf4, 0x46, 0x4a, 0x75, 0xfd, 0x39, 0x58, 0x7a, 0xea, 0xb9, 0x88, 0x30, 0x2f, 0xa0, 0x36, - 0xd9, 0xe2, 0x9c, 0x3a, 0x56, 0xc0, 0x09, 0x83, 0x35, 0x90, 0xf7, 0x31, 0x6f, 0xcb, 0xc4, 0x95, - 0x62, 0xff, 0x9e, 0x61, 0xde, 0x46, 0x12, 0x11, 0x1c, 0x7d, 0x42, 0x2d, 0x19, 0x7c, 0x82, 0xe3, - 0x80, 0x50, 0x0b, 0x49, 0xa4, 0xfe, 0x0d, 0x98, 0x4f, 0x28, 0x47, 0x41, 0x57, 0xd6, 0x56, 0x40, - 0x63, 0xb5, 0x15, 0x12, 0x0c, 0x45, 0x74, 0xf8, 0x08, 0xcc, 0xbb, 0xb1, 0xcc, 0x3e, 0x7a, 0xcc, - 0xf4, 0xac, 0x64, 0x5d, 0x0c, 0x07, 0xd5, 0xa4, 0x3a, 0x01, 0xa1, 0xf3, 0xbc, 0xa2, 0x21, 0x60, - 0x4a, 0x34, 0x26, 0x28, 0xb9, 0xb8, 0x47, 0x98, 0x8f, 0x6d, 0xa2, 0x42, 0xba, 0xad, 0x1c, 0x2e, - 0x3d, 0x1d, 0x02, 0x28, 0xe6, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, 0xd4, 0x0b, 0x7c, 0x59, - 0x9d, 0x52, 0x63, 0x4e, 0xb1, 0x14, 0x3e, 0x15, 0x44, 0x14, 0x61, 0xf0, 0x5d, 0x30, 0xdb, 0x27, - 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0xdb, 0xbc, 0x62, 0x9b, 0x3d, 0x88, 0xc8, 0x68, 0x88, 0xc3, - 0x7b, 0xa0, 0x48, 0x95, 0xe3, 0x7a, 0x41, 0xf2, 0x2e, 0x28, 0xde, 0xe2, 0x28, 0x83, 0x23, 0x0e, - 0xf8, 0x01, 0x28, 0xb3, 0xc0, 0x1a, 0x09, 0xcc, 0x48, 0x81, 0x45, 0x25, 0x50, 0xde, 0x8b, 0x21, - 0x94, 0xe4, 0x13, 0x61, 0x89, 0x18, 0xf5, 0xd9, 0xf1, 0xb0, 0x44, 0x0a, 0x90, 0x44, 0xea, 0x7f, - 0x68, 0xe0, 0xd6, 0x74, 0x15, 0x7b, 0x0f, 0x94, 0xb0, 0xef, 0xc8, 0xb0, 0x87, 0xb5, 0x9a, 0x13, - 0x79, 0xdd, 0x7a, 0xb6, 0x1b, 0x11, 0x51, 0x8c, 0x0b, 0xe6, 0xa1, 0x33, 0xa2, 0xaf, 0x47, 0xcc, - 0x43, 0x93, 0x0c, 0xc5, 0x38, 0xdc, 0x00, 0x73, 0xc3, 0x87, 0x2c, 0x92, 0x9e, 0x97, 0x02, 0xb7, - 0xc3, 0x41, 0x75, 0x0e, 0x25, 0x01, 0x34, 0xce, 0x57, 0xff, 0x3d, 0x0b, 0x96, 0xf7, 0x48, 0xf7, - 0xf0, 0xd5, 0x6c, 0x85, 0x2f, 0xc7, 0xb6, 0xc2, 0xa3, 0x6b, 0x8c, 0x6d, 0xba, 0xab, 0xaf, 0x76, - 0x33, 0xfc, 0x92, 0x05, 0x6f, 0x5e, 0xe1, 0x18, 0xfc, 0x1e, 0x40, 0x7a, 0x61, 0xd0, 0x54, 0x46, - 0x1f, 0x4c, 0x76, 0xe8, 0xe2, 0x90, 0x36, 0xee, 0x84, 0x83, 0x6a, 0xca, 0xf0, 0xa2, 0x14, 0x3b, - 0xf0, 0x07, 0x0d, 0x2c, 0xb9, 0x69, 0x8b, 0x4b, 0x65, 0x7d, 0x63, 0xb2, 0x07, 0xa9, 0x7b, 0xaf, - 0x71, 0x37, 0x1c, 0x54, 0xd3, 0x57, 0x22, 0x4a, 0x37, 0x28, 0x56, 0xce, 0x9d, 0x44, 0xa2, 0xc4, - 0xd0, 0xfc, 0x7f, 0xbd, 0xf6, 0xc5, 0x58, 0xaf, 0x7d, 0x32, 0x55, 0xaf, 0x25, 0x3c, 0xbd, 0xb4, - 0xd5, 0xac, 0x73, 0xad, 0xb6, 0x79, 0xed, 0x56, 0x4b, 0x6a, 0xbf, 0xba, 0xd3, 0x9e, 0x80, 0x95, - 0xcb, 0xbd, 0x9a, 0x7a, 0x75, 0xd7, 0x7f, 0xcd, 0x82, 0xc5, 0xd7, 0xe7, 0xc0, 0xcd, 0x86, 0xfe, - 0x34, 0x0f, 0x96, 0x5f, 0x0f, 0xfc, 0xd5, 0x03, 0x2f, 0x7e, 0xa2, 0x01, 0x23, 0x54, 0xfd, 0xf8, - 0x47, 0xb5, 0xda, 0x67, 0x84, 0x22, 0x89, 0xc0, 0xda, 0xf0, 0x36, 0x88, 0x7e, 0x58, 0x40, 0x64, - 0x5a, 0xfd, 0x0b, 0xd5, 0x61, 0xe0, 0x80, 0x02, 0x11, 0x17, 0xaf, 0x5e, 0xa8, 0xe5, 0x56, 0xcb, - 0xeb, 0xdb, 0x37, 0xee, 0x15, 0x43, 0x1e, 0xce, 0x3b, 0x2e, 0xa7, 0xc7, 0xf1, 0x0d, 0x22, 0x69, - 0x28, 0xb2, 0x00, 0xdf, 0x02, 0xb9, 0xc0, 0x69, 0xaa, 0x13, 0xa1, 0xac, 0x58, 0x72, 0xfb, 0xbb, - 0xdb, 0x48, 0xd0, 0x57, 0x0e, 0xd5, 0xed, 0x2d, 0x55, 0xc0, 0x05, 0x90, 0xeb, 0x90, 0xe3, 0x68, - 0xce, 0x90, 0xf8, 0x84, 0x0d, 0x50, 0xe8, 0x8b, 0xb3, 0x5c, 0xe5, 0xf9, 0xde, 0x64, 0x4f, 0xe3, - 0x53, 0x1e, 0x45, 0xa2, 0x9b, 0xd9, 0x87, 0x5a, 0xfd, 0x4f, 0x0d, 0xdc, 0xbd, 0xb4, 0x21, 0xc5, - 0xa1, 0x84, 0xbb, 0x5d, 0xef, 0x88, 0x34, 0xa5, 0xed, 0x62, 0x7c, 0x28, 0x6d, 0x45, 0x64, 0x34, - 0xc4, 0xe1, 0x3b, 0x60, 0x86, 0x12, 0xcc, 0x3c, 0x57, 0x1d, 0x67, 0xa3, 0x5e, 0x46, 0x92, 0x8a, - 0x14, 0x0a, 0xb7, 0xc0, 0x3c, 0x11, 0xe6, 0xa5, 0x73, 0x3b, 0x94, 0x7a, 0xc3, 0x8a, 0x2d, 0x2b, - 0x81, 0xf9, 0x9d, 0x71, 0x18, 0x9d, 0xe7, 0x17, 0xa6, 0x9a, 0xc4, 0x75, 0x48, 0x53, 0x5e, 0x6f, - 0xc5, 0xd8, 0xd4, 0xb6, 0xa4, 0x22, 0x85, 0xd6, 0xff, 0xcd, 0x02, 0xfd, 0xb2, 0xb5, 0x07, 0x3b, - 0xf1, 0x15, 0x23, 0x41, 0x79, 0x48, 0x95, 0xd7, 0x8d, 0xeb, 0x8f, 0x8c, 0x10, 0x6b, 0x2c, 0x29, - 0xdb, 0x73, 0x49, 0x6a, 0xe2, 0xf2, 0x91, 0x4f, 0x78, 0x04, 0x16, 0xdc, 0xf1, 0x93, 0x3b, 0xba, - 0xc9, 0xca, 0xeb, 0x6b, 0x53, 0x0d, 0x88, 0x34, 0xa9, 0x2b, 0x93, 0x0b, 0xe7, 0x00, 0x86, 0x2e, - 0x18, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, 0x26, 0xba, 0x18, 0x6f, - 0xcb, 0xdd, 0x11, 0x82, 0x12, 0x5c, 0x69, 0x15, 0xca, 0x4f, 0x57, 0xa1, 0xc6, 0xfd, 0x93, 0xb3, - 0x4a, 0xe6, 0xe5, 0x59, 0x25, 0x73, 0x7a, 0x56, 0xc9, 0xbc, 0x08, 0x2b, 0xda, 0x49, 0x58, 0xd1, - 0x5e, 0x86, 0x15, 0xed, 0x34, 0xac, 0x68, 0x7f, 0x87, 0x15, 0xed, 0xc7, 0x7f, 0x2a, 0x99, 0xcf, - 0x67, 0x55, 0x84, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xba, 0xf8, 0x96, 0xa4, 0x0f, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 950e9334..174e6f5f 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -17,48 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus - Scale - ScaleSpec - ScaleStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -74,114 +50,664 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_2bb1f2101a7f10e2, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_2bb1f2101a7f10e2, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() } +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_2bb1f2101a7f10e2, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_2bb1f2101a7f10e2, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_2bb1f2101a7f10e2, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{16} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{17} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{18} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_2bb1f2101a7f10e2) +} + +var fileDescriptor_2bb1f2101a7f10e2 = []byte{ + // 1516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, + 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, + 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, + 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, + 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, + 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, + 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, + 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, + 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, + 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, + 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, + 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, + 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, + 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, + 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, + 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, + 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, + 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, + 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, + 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, + 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, + 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, + 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, + 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, + 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, + 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, + 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, + 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, + 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, + 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, + 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, + 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, + 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, + 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, + 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, + 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, + 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, + 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, + 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, + 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, + 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, + 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, + 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, + 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, + 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, + 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, + 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, + 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, + 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, + 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, + 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, + 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, + 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, + 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, + 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, + 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, + 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, + 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, + 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, + 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, + 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, + 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, + 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, + 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, + 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, + 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, + 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, + 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, + 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, + 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, + 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, + 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, + 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, + 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, + 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, + 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, + 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, + 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, + 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, + 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, + 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, + 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, + 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, + 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, + 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, + 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, + 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, + 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, + 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, + 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, + 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, + 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, + 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, + 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, + 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") - proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") - proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") -} func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -189,29 +715,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -219,51 +753,63 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x22 } if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0x1a } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -271,49 +817,61 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n4 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -321,41 +879,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -363,41 +932,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -405,37 +985,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -443,38 +1032,45 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x20 } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if m.TargetCPUUtilizationPercentage != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -482,43 +1078,50 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + if m.CurrentCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x28 } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -526,61 +1129,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x22 } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -588,61 +1205,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -650,57 +1281,71 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n23 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n24 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n25 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -708,57 +1353,71 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n27 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n29 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -766,39 +1425,49 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -806,39 +1475,49 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n33 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -846,36 +1525,44 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -883,34 +1570,42 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 } - i += n35 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -918,41 +1613,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -960,20 +1666,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -981,30 +1692,41 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x12 - i++ + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) - return i, nil + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -1017,6 +1739,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1037,6 +1762,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1055,6 +1783,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1067,6 +1798,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1083,6 +1817,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1097,6 +1834,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1112,6 +1852,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1130,6 +1873,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1154,6 +1900,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1178,6 +1927,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1198,6 +1950,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1218,6 +1973,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1232,6 +1990,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1246,6 +2007,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1261,6 +2025,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1274,6 +2041,9 @@ func (m *ResourceMetricStatus) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1286,6 +2056,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1293,6 +2066,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1302,14 +2078,7 @@ func (m *ScaleStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1332,9 +2101,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1345,9 +2114,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1357,7 +2126,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1371,7 +2140,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1382,9 +2151,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1408,7 +2182,7 @@ func (this *HorizontalPodAutoscalerStatus) String() string { } s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, @@ -1422,10 +2196,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1436,10 +2210,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1451,9 +2225,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1465,9 +2239,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1478,8 +2252,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1490,8 +2264,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1503,7 +2277,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1515,7 +2289,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1525,7 +2299,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1576,7 +2350,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1604,7 +2378,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1614,6 +2388,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1633,7 +2410,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1643,6 +2420,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1662,7 +2442,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1672,6 +2452,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1686,6 +2469,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1713,7 +2499,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1741,7 +2527,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1751,6 +2537,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1770,7 +2559,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1779,11 +2568,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1803,7 +2595,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1812,11 +2604,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetValue = &resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1836,7 +2631,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1845,11 +2640,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1864,6 +2662,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1891,7 +2692,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1919,7 +2720,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1929,6 +2730,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1948,7 +2752,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1957,11 +2761,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1981,7 +2788,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1990,6 +2797,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2011,7 +2821,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2020,11 +2830,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2039,6 +2852,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2066,7 +2882,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2094,7 +2910,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2103,6 +2919,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2124,7 +2943,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2133,6 +2952,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2154,7 +2976,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2163,6 +2985,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2179,6 +3004,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2206,7 +3034,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2234,7 +3062,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2244,6 +3072,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2263,7 +3094,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2273,6 +3104,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2292,7 +3126,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2301,6 +3135,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2322,7 +3159,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2332,6 +3169,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2351,7 +3191,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2361,6 +3201,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2375,6 +3218,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2402,7 +3248,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2430,7 +3276,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2439,6 +3285,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2460,7 +3309,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2469,6 +3318,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2486,6 +3338,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2513,7 +3368,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2541,7 +3396,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2550,6 +3405,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2571,7 +3429,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2591,7 +3449,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2610,7 +3468,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2625,6 +3483,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2652,7 +3513,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2680,7 +3541,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2700,7 +3561,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2709,11 +3570,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2733,7 +3597,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2752,7 +3616,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2771,7 +3635,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2786,6 +3650,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2813,7 +3680,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2841,7 +3708,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2851,6 +3718,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2870,7 +3740,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2879,6 +3749,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2903,7 +3776,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2912,6 +3785,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2936,7 +3812,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2945,6 +3821,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2969,7 +3848,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2978,6 +3857,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2997,6 +3879,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3024,7 +3909,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3052,7 +3937,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3062,6 +3947,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3081,7 +3969,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3090,6 +3978,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3114,7 +4005,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3123,6 +4014,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3147,7 +4041,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3156,6 +4050,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3180,7 +4077,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3189,6 +4086,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3208,6 +4108,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3235,7 +4138,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3263,7 +4166,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3272,6 +4175,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3293,7 +4199,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3303,6 +4209,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3322,7 +4231,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3331,6 +4240,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3352,7 +4264,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3361,11 +4273,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3385,7 +4300,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3394,11 +4309,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3413,6 +4331,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3440,7 +4361,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3468,7 +4389,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3477,6 +4398,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3498,7 +4422,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3508,6 +4432,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3527,7 +4454,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3536,6 +4463,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3557,7 +4487,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3566,11 +4496,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3590,7 +4523,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3599,11 +4532,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3618,6 +4554,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3645,7 +4584,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3673,7 +4612,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3683,6 +4622,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3702,7 +4644,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3711,6 +4653,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3732,7 +4677,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3741,11 +4686,14 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3760,6 +4708,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3787,7 +4738,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3815,7 +4766,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3825,6 +4776,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3844,7 +4798,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3853,6 +4807,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3874,7 +4831,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3883,11 +4840,14 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3902,6 +4862,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3929,7 +4892,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3957,7 +4920,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3967,6 +4930,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3986,7 +4952,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4006,7 +4972,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4015,11 +4981,14 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4034,6 +5003,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4061,7 +5033,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4089,7 +5061,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4099,6 +5071,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4118,7 +5093,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4138,7 +5113,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4147,6 +5122,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4163,6 +5141,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4190,7 +5171,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4218,7 +5199,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4227,6 +5208,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4248,7 +5232,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4257,6 +5241,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4278,7 +5265,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4287,6 +5274,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4303,6 +5293,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4330,7 +5323,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4358,7 +5351,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4372,6 +5365,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4399,7 +5395,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4427,7 +5423,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4446,7 +5442,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4456,6 +5452,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4470,6 +5469,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4536,10 +5538,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4568,6 +5573,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4586,106 +5594,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 5b56b2ac..f50ed9d1 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -88,11 +88,11 @@ message ExternalMetricStatus { // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -141,7 +141,11 @@ message HorizontalPodAutoscalerSpec { // and will set the desired number of pods by using its Scale subresource. optional CrossVersionObjectReference scaleTargetRef = 1; - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; @@ -380,15 +384,15 @@ message ResourceMetricStatus { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index c03af13a..519bd786 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,7 +38,11 @@ type HorizontalPodAutoscalerSpec struct { // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption // and will set the desired number of pods by using its Scale subresource. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. @@ -78,11 +82,11 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -109,15 +113,15 @@ type HorizontalPodAutoscalerList struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 72ac9727..129ce2b4 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", - "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } @@ -219,9 +219,9 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index 3fda47d5..ddb60112 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index b6a5f356..0b6ed381 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -17,45 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto -/* - Package v2beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ package v2beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -71,76 +50,450 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_26c1bfc7a52d0478, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_26c1bfc7a52d0478, []int{4} } +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_26c1bfc7a52d0478, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_26c1bfc7a52d0478, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() } +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_26c1bfc7a52d0478, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") @@ -160,10 +513,112 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_26c1bfc7a52d0478) +} + +var fileDescriptor_26c1bfc7a52d0478 = []byte{ + // 1475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, + 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, + 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, + 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, + 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, + 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, + 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, + 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, + 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, + 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, + 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, + 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, + 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, + 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, + 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, + 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, + 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, + 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, + 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, + 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, + 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, + 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, + 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, + 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, + 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, + 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, + 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, + 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, + 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, + 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, + 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, + 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, + 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, + 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, + 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, + 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, + 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, + 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, + 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, + 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, + 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, + 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, + 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, + 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, + 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, + 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, + 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, + 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, + 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, + 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, + 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, + 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, + 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, + 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, + 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, + 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, + 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, + 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, + 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, + 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, + 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, + 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, + 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, + 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, + 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, + 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, + 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, + 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, + 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, + 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, + 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, + 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, + 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, + 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, + 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, + 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, + 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, + 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, + 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, + 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, + 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, + 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, + 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, + 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, + 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, + 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, + 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, + 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, + 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, + 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, + 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, + 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, + 0x18, 0x00, 0x00, +} + func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -171,29 +626,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -201,51 +664,63 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x22 } if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0x1a } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -253,49 +728,61 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n4 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -303,41 +790,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -345,41 +843,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -387,37 +896,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -425,45 +943,54 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -471,62 +998,73 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n13 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -534,61 +1072,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -596,61 +1148,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -658,57 +1224,71 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n23 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n24 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n25 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -716,57 +1296,71 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n27 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n29 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -774,39 +1368,49 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -814,39 +1418,49 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n33 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -854,36 +1468,44 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- + dAtA[i] = 0x1a } - return i, nil + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -891,40 +1513,53 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 } - i += n35 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -937,6 +1572,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -957,6 +1595,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -975,6 +1616,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -987,6 +1631,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1003,6 +1650,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1017,6 +1667,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1035,6 +1688,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1062,6 +1718,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1086,6 +1745,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1110,6 +1772,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1130,6 +1795,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1150,6 +1818,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1164,6 +1835,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1178,6 +1852,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1193,6 +1870,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1206,14 +1886,7 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1236,9 +1909,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1249,9 +1922,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1261,7 +1934,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1275,7 +1948,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1286,9 +1959,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1297,11 +1975,16 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, `}`, }, "") return s @@ -1310,13 +1993,23 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1327,10 +2020,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1341,10 +2034,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1356,9 +2049,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1370,9 +2063,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1383,8 +2076,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1395,8 +2088,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1408,7 +2101,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1420,7 +2113,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1448,7 +2141,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1476,7 +2169,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1486,6 +2179,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1505,7 +2201,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1515,6 +2211,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1534,7 +2233,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1544,6 +2243,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1555,7 +2257,10 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1585,7 +2290,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1613,7 +2318,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1623,6 +2328,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1642,7 +2350,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1651,11 +2359,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1675,7 +2386,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1684,11 +2395,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetValue = &resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1708,7 +2422,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1717,11 +2431,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1736,6 +2453,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1763,7 +2483,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1791,7 +2511,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1801,6 +2521,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1820,7 +2543,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1829,11 +2552,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1853,7 +2579,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1862,6 +2588,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1883,7 +2612,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1892,11 +2621,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1911,6 +2643,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1938,7 +2673,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1966,7 +2701,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1975,6 +2710,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1996,7 +2734,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2005,6 +2743,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2026,7 +2767,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2035,6 +2776,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2051,6 +2795,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2078,7 +2825,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2106,7 +2853,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2116,6 +2863,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2135,7 +2885,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2145,6 +2895,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2164,7 +2917,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2173,6 +2926,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2194,7 +2950,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2204,6 +2960,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2223,7 +2982,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2233,6 +2992,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2247,6 +3009,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2274,7 +3039,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2302,7 +3067,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2311,6 +3076,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2332,7 +3100,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2341,6 +3109,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2358,6 +3129,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2385,7 +3159,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2413,7 +3187,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2422,6 +3196,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2443,7 +3220,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2463,7 +3240,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2482,7 +3259,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2491,6 +3268,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2508,6 +3288,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2535,7 +3318,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2563,7 +3346,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2583,7 +3366,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2592,11 +3375,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2616,7 +3402,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2635,7 +3421,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2654,7 +3440,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2663,6 +3449,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2685,7 +3474,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2694,6 +3483,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2711,6 +3503,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2738,7 +3533,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2766,7 +3561,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2776,6 +3571,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2795,7 +3593,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2804,6 +3602,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2828,7 +3629,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2837,6 +3638,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2861,7 +3665,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2870,6 +3674,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2894,7 +3701,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2903,6 +3710,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2922,6 +3732,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2949,7 +3762,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2977,7 +3790,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2987,6 +3800,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3006,7 +3822,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3015,6 +3831,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3039,7 +3858,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3048,6 +3867,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3072,7 +3894,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3081,6 +3903,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3105,7 +3930,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3114,6 +3939,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3133,6 +3961,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3160,7 +3991,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3188,7 +4019,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3197,6 +4028,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3218,7 +4052,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3228,6 +4062,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3247,7 +4084,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3256,6 +4093,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3277,7 +4117,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3286,11 +4126,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3310,7 +4153,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3319,11 +4162,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3338,6 +4184,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3365,7 +4214,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3393,7 +4242,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3402,6 +4251,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3423,7 +4275,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3433,6 +4285,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3452,7 +4307,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3461,6 +4316,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3482,7 +4340,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3491,11 +4349,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3515,7 +4376,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3524,11 +4385,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3543,6 +4407,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3570,7 +4437,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3598,7 +4465,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3608,6 +4475,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3627,7 +4497,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3636,6 +4506,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3657,7 +4530,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3666,11 +4539,14 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3685,6 +4561,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3712,7 +4591,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3740,7 +4619,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3750,6 +4629,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3769,7 +4651,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3778,6 +4660,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3799,7 +4684,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3808,11 +4693,14 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3827,6 +4715,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3854,7 +4745,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3882,7 +4773,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3892,6 +4783,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3911,7 +4805,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3931,7 +4825,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3940,11 +4834,14 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3959,6 +4856,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3986,7 +4886,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4014,7 +4914,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4024,6 +4924,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4043,7 +4946,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4063,7 +4966,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4072,6 +4975,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4088,6 +4994,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4154,10 +5063,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4186,6 +5098,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4204,104 +5119,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 04bc0ed6..f90f93f9 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -92,12 +92,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -146,8 +146,11 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 6a30e677..92fe5a2b 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v2beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,8 +38,11 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -377,12 +380,12 @@ type ExternalMetricStatus struct { type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 589408ac..a0d5f533 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index 2ec7e615..c51e05b8 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index 816fea9d..23bc5b98 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -17,48 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto -/* - Package v2beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricIdentifier - MetricSpec - MetricStatus - MetricTarget - MetricValueStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ package v2beta2 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -74,88 +50,534 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_592ad94d7d6be24f, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_592ad94d7d6be24f, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_592ad94d7d6be24f, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_592ad94d7d6be24f, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_592ad94d7d6be24f, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) } -func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } -func (*MetricIdentifier) ProtoMessage() {} -func (*MetricIdentifier) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } +func (*MetricIdentifier) ProtoMessage() {} +func (*MetricIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{8} +} +func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricIdentifier.Merge(m, src) +} +func (m *MetricIdentifier) XXX_Size() int { + return m.Size() +} +func (m *MetricIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_MetricIdentifier.DiscardUnknown(m) +} -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo -func (m *MetricTarget) Reset() { *m = MetricTarget{} } -func (*MetricTarget) ProtoMessage() {} -func (*MetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{9} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} -func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } -func (*MetricValueStatus) ProtoMessage() {} -func (*MetricValueStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{10} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *MetricTarget) Reset() { *m = MetricTarget{} } +func (*MetricTarget) ProtoMessage() {} +func (*MetricTarget) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{11} +} +func (m *MetricTarget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricTarget) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTarget.Merge(m, src) +} +func (m *MetricTarget) XXX_Size() int { + return m.Size() +} +func (m *MetricTarget) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTarget.DiscardUnknown(m) +} -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_MetricTarget proto.InternalMessageInfo -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } +func (*MetricValueStatus) ProtoMessage() {} +func (*MetricValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{12} +} +func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricValueStatus.Merge(m, src) +} +func (m *MetricValueStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricValueStatus.DiscardUnknown(m) +} -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{13} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{14} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{15} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{16} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{17} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{18} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") @@ -178,10 +600,109 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_592ad94d7d6be24f) +} + +var fileDescriptor_592ad94d7d6be24f = []byte{ + // 1425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, + 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, + 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, + 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, + 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, + 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, + 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, + 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, + 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, + 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, + 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, + 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, + 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, + 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, + 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, + 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, + 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, + 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, + 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, + 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, + 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, + 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, + 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, + 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, + 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, + 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, + 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, + 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, + 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, + 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, + 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, + 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, + 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, + 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, + 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, + 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, + 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, + 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, + 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, + 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, + 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, + 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, + 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, + 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, + 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, + 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, + 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, + 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, + 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, + 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, + 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, + 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, + 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, + 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, + 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, + 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, + 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, + 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, + 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, + 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, + 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, + 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, + 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, + 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, + 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, + 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, + 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, + 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, + 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, + 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, + 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, + 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, + 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, + 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, + 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, + 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, + 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, + 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, + 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, + 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, + 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, + 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, + 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, + 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, + 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, + 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, + 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, + 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, + 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, + 0x00, +} + func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -189,29 +710,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -219,33 +748,42 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n1, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n2, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -253,33 +791,42 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n3, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n4, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -287,41 +834,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -329,41 +887,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -371,37 +940,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -409,45 +987,54 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n10, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i += n10 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -455,62 +1042,73 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n11, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n11 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -518,31 +1116,39 @@ func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { } func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n12, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -550,61 +1156,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n13, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n14, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n15, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n16, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -612,61 +1232,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n17, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n18, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n18 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n19, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n20, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricTarget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -674,46 +1308,56 @@ func (m *MetricTarget) Marshal() (dAtA []byte, err error) { } func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n21, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x20 } if m.AverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n22, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x1a } - if m.AverageUtilization != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -721,42 +1365,51 @@ func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricValueStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Value != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n23, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x18 } if m.AverageValue != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n24, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- + dAtA[i] = 0x12 } - if m.AverageUtilization != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -764,41 +1417,52 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n25, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n27, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n27 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -806,41 +1470,52 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n28, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n29, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n30, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -848,33 +1523,42 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n31, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n31 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n32, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -882,33 +1566,42 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n33, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n33 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n34, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -916,29 +1609,37 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n35, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -946,35 +1647,48 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n36, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n36 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -987,6 +1701,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -997,6 +1714,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1007,6 +1727,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1019,6 +1742,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1035,6 +1761,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1049,6 +1778,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1067,6 +1799,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1094,6 +1829,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricIdentifier) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1106,6 +1844,9 @@ func (m *MetricIdentifier) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1130,6 +1871,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1154,6 +1898,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *MetricTarget) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1173,6 +1920,9 @@ func (m *MetricTarget) Size() (n int) { } func (m *MetricValueStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Value != nil { @@ -1190,6 +1940,9 @@ func (m *MetricValueStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.DescribedObject.Size() @@ -1202,6 +1955,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1214,6 +1970,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1224,6 +1983,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1234,6 +1996,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1244,6 +2009,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1254,14 +2022,7 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1305,7 +2066,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1319,7 +2080,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1330,9 +2091,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1341,11 +2107,16 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, `}`, }, "") return s @@ -1354,13 +2125,23 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1371,7 +2152,7 @@ func (this *MetricIdentifier) String() string { } s := strings.Join([]string{`&MetricIdentifier{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1382,10 +2163,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1396,10 +2177,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1410,8 +2191,8 @@ func (this *MetricTarget) String() string { } s := strings.Join([]string{`&MetricTarget{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -1422,8 +2203,8 @@ func (this *MetricValueStatus) String() string { return "nil" } s := strings.Join([]string{`&MetricValueStatus{`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -1520,7 +2301,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1548,7 +2329,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1558,6 +2339,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1577,7 +2361,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1587,6 +2371,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1606,7 +2393,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1616,6 +2403,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1627,7 +2417,10 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1657,7 +2450,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1685,7 +2478,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1694,6 +2487,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1715,7 +2511,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1724,6 +2520,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1740,6 +2539,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1767,7 +2569,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1795,7 +2597,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1804,6 +2606,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1825,7 +2630,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1834,6 +2639,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1850,6 +2658,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1877,7 +2688,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1905,7 +2716,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1914,6 +2725,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1935,7 +2749,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1944,6 +2758,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1965,7 +2782,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1974,6 +2791,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1990,6 +2810,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2017,7 +2840,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2045,7 +2868,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2055,6 +2878,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2074,7 +2900,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2084,6 +2910,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2103,7 +2932,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2112,6 +2941,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2133,7 +2965,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2143,6 +2975,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2162,7 +2997,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2172,6 +3007,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2186,6 +3024,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2213,7 +3054,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2241,7 +3082,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2250,6 +3091,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2271,7 +3115,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2280,6 +3124,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2297,6 +3144,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2324,7 +3174,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2352,7 +3202,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2361,6 +3211,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2382,7 +3235,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2402,7 +3255,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2421,7 +3274,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2430,6 +3283,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2447,6 +3303,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2474,7 +3333,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2502,7 +3361,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2522,7 +3381,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2531,11 +3390,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2555,7 +3417,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2574,7 +3436,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2593,7 +3455,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2602,6 +3464,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2624,7 +3489,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2633,6 +3498,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2650,6 +3518,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2677,7 +3548,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2705,7 +3576,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2715,6 +3586,9 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2734,7 +3608,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2743,11 +3617,14 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2762,6 +3639,9 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2789,7 +3669,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2817,7 +3697,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2827,6 +3707,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2846,7 +3729,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2855,6 +3738,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2879,7 +3765,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2888,6 +3774,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2912,7 +3801,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2921,6 +3810,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2945,7 +3837,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2954,6 +3846,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2973,6 +3868,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3000,7 +3898,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3028,7 +3926,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3038,6 +3936,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3057,7 +3958,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3066,6 +3967,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3090,7 +3994,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3099,6 +4003,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3123,7 +4030,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3132,6 +4039,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3156,7 +4066,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3165,6 +4075,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3184,6 +4097,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3211,7 +4127,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3239,7 +4155,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3249,6 +4165,9 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3268,7 +4187,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3277,11 +4196,14 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.Value = &resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3301,7 +4223,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3310,11 +4232,14 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3334,7 +4259,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3349,6 +4274,9 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3376,7 +4304,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3404,7 +4332,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3413,11 +4341,14 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.Value = &resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3437,7 +4368,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3446,11 +4377,14 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3470,7 +4404,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3485,6 +4419,9 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3512,7 +4449,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3540,7 +4477,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3549,6 +4486,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3570,7 +4510,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3579,6 +4519,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3600,7 +4543,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3609,6 +4552,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3625,6 +4571,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3652,7 +4601,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3680,7 +4629,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3689,6 +4638,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3710,7 +4662,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3719,6 +4671,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3740,7 +4695,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3749,6 +4704,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3765,6 +4723,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3792,7 +4753,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3820,7 +4781,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3829,6 +4790,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3850,7 +4814,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3859,6 +4823,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3875,6 +4842,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3902,7 +4872,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3930,7 +4900,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3939,6 +4909,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3960,7 +4933,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3969,6 +4942,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3985,6 +4961,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4012,7 +4991,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4040,7 +5019,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4050,6 +5029,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4069,7 +5051,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4078,6 +5060,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4094,6 +5079,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4121,7 +5109,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4149,7 +5137,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4159,6 +5147,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4178,7 +5169,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4187,6 +5178,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4203,6 +5197,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4269,10 +5266,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4301,6 +5301,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4319,101 +5322,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, - 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, - 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, - 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, - 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, - 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, - 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, - 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, - 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, - 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, - 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, - 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, - 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, - 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, - 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, - 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, - 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, - 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, - 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, - 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, - 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, - 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, - 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, - 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, - 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, - 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, - 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, - 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, - 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, - 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, - 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, - 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, - 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, - 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, - 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, - 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, - 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, - 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, - 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, - 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, - 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, - 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, - 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, - 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, - 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, - 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, - 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, - 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, - 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, - 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, - 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, - 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, - 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, - 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, - 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, - 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, - 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, - 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, - 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, - 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, - 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, - 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, - 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, - 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, - 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, - 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, - 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, - 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, - 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, - 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, - 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, - 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, - 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, - 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, - 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, - 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, - 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, - 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, - 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, - 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, - 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, - 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, - 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, - 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, - 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, - 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, - 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, - 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, - 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index b4e4c95a..80f1d345 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta2"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -69,12 +69,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -123,8 +123,11 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 2d337953..314d7095 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -19,7 +19,7 @@ limitations under the License. package v2beta2 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,12 +33,12 @@ import ( type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -52,8 +52,11 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -73,7 +76,7 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 996dc184..bb85b9f0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -60,8 +60,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -95,7 +95,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index a6a95653..2dffa333 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -126,7 +126,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 3aa32b57..fb9d21e1 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -17,32 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto - - It has these top-level messages: - Job - JobCondition - JobList - JobSpec - JobStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -55,25 +45,145 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Job) Reset() { *m = Job{} } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Job) Reset() { *m = Job{} } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{0} +} +func (m *Job) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Job) XXX_Merge(src proto.Message) { + xxx_messageInfo_Job.Merge(m, src) +} +func (m *Job) XXX_Size() int { + return m.Size() +} +func (m *Job) XXX_DiscardUnknown() { + xxx_messageInfo_Job.DiscardUnknown(m) +} + +var xxx_messageInfo_Job proto.InternalMessageInfo -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (*JobCondition) ProtoMessage() {} -func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (*JobCondition) ProtoMessage() {} +func (*JobCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{1} +} +func (m *JobCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobCondition.Merge(m, src) +} +func (m *JobCondition) XXX_Size() int { + return m.Size() +} +func (m *JobCondition) XXX_DiscardUnknown() { + xxx_messageInfo_JobCondition.DiscardUnknown(m) +} -func (m *JobList) Reset() { *m = JobList{} } -func (*JobList) ProtoMessage() {} -func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_JobCondition proto.InternalMessageInfo -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (*JobSpec) ProtoMessage() {} -func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *JobList) Reset() { *m = JobList{} } +func (*JobList) ProtoMessage() {} +func (*JobList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{2} +} +func (m *JobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobList.Merge(m, src) +} +func (m *JobList) XXX_Size() int { + return m.Size() +} +func (m *JobList) XXX_DiscardUnknown() { + xxx_messageInfo_JobList.DiscardUnknown(m) +} -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (*JobStatus) ProtoMessage() {} -func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_JobList proto.InternalMessageInfo + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (*JobSpec) ProtoMessage() {} +func (*JobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{3} +} +func (m *JobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobSpec.Merge(m, src) +} +func (m *JobSpec) XXX_Size() int { + return m.Size() +} +func (m *JobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobSpec proto.InternalMessageInfo + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{4} +} +func (m *JobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobStatus.Merge(m, src) +} +func (m *JobStatus) XXX_Size() int { + return m.Size() +} +func (m *JobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_JobStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_JobStatus proto.InternalMessageInfo func init() { proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") @@ -82,10 +192,78 @@ func init() { proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptor_3b52da57c93de713) +} + +var fileDescriptor_3b52da57c93de713 = []byte{ + // 929 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, + 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, + 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, + 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, + 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xcf, + 0x3d, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, + 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, + 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, + 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, + 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, + 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, + 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, + 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, + 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, + 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x0b, 0x95, 0x96, 0x66, 0x3c, 0x19, 0x49, + 0x80, 0x9b, 0x92, 0x6d, 0xce, 0x8e, 0xcd, 0x67, 0xce, 0xcf, 0xe0, 0x8a, 0x4b, 0x10, 0xd4, 0xc6, + 0x37, 0xa9, 0xb1, 0x96, 0xa5, 0x06, 0xaa, 0x30, 0x52, 0xba, 0xe2, 0xaf, 0xd0, 0x06, 0x8f, 0xc1, + 0xed, 0xac, 0x2b, 0xf7, 0x77, 0xcc, 0x25, 0xef, 0x6f, 0x5e, 0x30, 0xa7, 0x1f, 0x83, 0x6b, 0xef, + 0x14, 0x4e, 0x1b, 0xf2, 0x44, 0x94, 0x0e, 0x9f, 0xa3, 0x4d, 0x2e, 0xa8, 0x98, 0xf2, 0x4e, 0x43, + 0x39, 0xe8, 0x2b, 0x1d, 0x14, 0xcb, 0xde, 0x2b, 0x3c, 0x36, 0xf3, 0x33, 0x29, 0xd4, 0xdd, 0x3f, + 0x1b, 0x68, 0xe7, 0x82, 0x39, 0x67, 0x2c, 0xf2, 0x7c, 0xe1, 0xb3, 0x08, 0x9f, 0xa2, 0x0d, 0x71, + 0x1d, 0x83, 0xfa, 0xec, 0xb6, 0xfd, 0x64, 0x5e, 0x7a, 0x70, 0x1d, 0xc3, 0xab, 0xd4, 0xd8, 0xaf, + 0x73, 0x25, 0x46, 0x14, 0x1b, 0xf7, 0xca, 0x76, 0xd6, 0x95, 0xee, 0x74, 0xb1, 0xdc, 0xab, 0xd4, + 0x58, 0x92, 0x0e, 0xb3, 0x74, 0x5a, 0x6c, 0x0a, 0x8f, 0xd0, 0x6e, 0x40, 0xb9, 0xb8, 0x4a, 0x98, + 0x03, 0x03, 0x3f, 0x84, 0xe2, 0x1b, 0x3f, 0x7c, 0xd8, 0x0c, 0xa4, 0xc2, 0x3e, 0x28, 0x1a, 0xd8, + 0xed, 0xd5, 0x8d, 0xc8, 0xa2, 0x2f, 0x9e, 0x21, 0x2c, 0x81, 0x41, 0x42, 0x23, 0x9e, 0x7f, 0x92, + 0xac, 0xb6, 0xf1, 0xda, 0xd5, 0x0e, 0x8b, 0x6a, 0xb8, 0x77, 0xcf, 0x8d, 0x2c, 0xa9, 0x80, 0xdf, + 0x47, 0x9b, 0x09, 0x50, 0xce, 0xa2, 0x4e, 0x53, 0x3d, 0x57, 0x39, 0x1d, 0xa2, 0x50, 0x52, 0xdc, + 0xe2, 0x0f, 0xd0, 0x56, 0x08, 0x9c, 0xd3, 0x11, 0x74, 0x36, 0x15, 0xf1, 0x51, 0x41, 0xdc, 0xba, + 0xcc, 0x61, 0x32, 0xbf, 0xef, 0xfe, 0xae, 0xa1, 0xad, 0x0b, 0xe6, 0xf4, 0x7c, 0x2e, 0xf0, 0x0f, + 0xf7, 0xe2, 0x6b, 0x3e, 0xec, 0x63, 0xa4, 0x5a, 0x85, 0x77, 0xbf, 0xa8, 0xd3, 0x9a, 0x23, 0xb5, + 0xe8, 0x7e, 0x89, 0x9a, 0xbe, 0x80, 0x50, 0x8e, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, + 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, + 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, + 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x1f, 0x33, + 0x8f, 0x5b, 0x21, 0x39, 0xab, 0x60, 0x52, 0xe7, 0xe0, 0x67, 0xe8, 0x80, 0xba, 0xc2, 0x9f, 0xc1, + 0xd7, 0x40, 0xbd, 0xc0, 0x8f, 0xa0, 0x0f, 0x2e, 0x8b, 0xbc, 0xfc, 0xaf, 0xd3, 0xb0, 0xdf, 0xca, + 0x52, 0xe3, 0xe0, 0xe9, 0x32, 0x02, 0x59, 0xae, 0xc3, 0xa7, 0x68, 0xc7, 0xa1, 0xee, 0x84, 0x0d, + 0x87, 0x3d, 0x3f, 0xf4, 0x45, 0x67, 0x4b, 0x35, 0xb1, 0x9f, 0xa5, 0xc6, 0x8e, 0x5d, 0xc3, 0xc9, + 0x02, 0x0b, 0xff, 0x88, 0x5a, 0x1c, 0x02, 0x70, 0x05, 0x4b, 0x8a, 0x88, 0x7d, 0xf2, 0xc0, 0xa9, + 0x50, 0x07, 0x82, 0x7e, 0x21, 0xb5, 0x77, 0xe4, 0x58, 0xe6, 0x27, 0x52, 0x5a, 0xe2, 0xcf, 0xd1, + 0x5e, 0x48, 0xa3, 0x29, 0x2d, 0x99, 0x2a, 0x5b, 0x2d, 0x1b, 0x67, 0xa9, 0xb1, 0x77, 0xb9, 0x70, + 0x43, 0xee, 0x30, 0xf1, 0xb7, 0xa8, 0x25, 0x20, 0x8c, 0x03, 0x2a, 0xf2, 0xa0, 0x6d, 0x9f, 0xbc, + 0x57, 0x9f, 0xaa, 0xfc, 0xbf, 0xca, 0x46, 0xae, 0x98, 0x37, 0x28, 0x68, 0x6a, 0x31, 0x95, 0x29, + 0x99, 0xa3, 0xa4, 0xb4, 0xc1, 0xcf, 0xd1, 0x63, 0x21, 0x82, 0xe2, 0xc5, 0x9e, 0x0e, 0x05, 0x24, + 0xe7, 0x7e, 0xe4, 0xf3, 0x31, 0x78, 0x9d, 0x96, 0x7a, 0xae, 0xb7, 0xb3, 0xd4, 0x78, 0x3c, 0x18, + 0xf4, 0x96, 0x51, 0xc8, 0x2a, 0x6d, 0xf7, 0xb7, 0x06, 0x6a, 0x97, 0x5b, 0x0d, 0x3f, 0x47, 0xc8, + 0x9d, 0xef, 0x10, 0xde, 0xd1, 0x54, 0x1e, 0xdf, 0x5d, 0x95, 0xc7, 0x72, 0xdb, 0x54, 0xab, 0xb9, + 0x84, 0x38, 0xa9, 0x19, 0xe1, 0xef, 0x50, 0x9b, 0x0b, 0x9a, 0x08, 0xb5, 0x0d, 0xd6, 0x5f, 0x7b, + 0x1b, 0xec, 0x66, 0xa9, 0xd1, 0xee, 0xcf, 0x0d, 0x48, 0xe5, 0x85, 0x87, 0x68, 0xaf, 0x0a, 0xe6, + 0xff, 0xdc, 0x6c, 0x6a, 0x9e, 0x67, 0x0b, 0x2e, 0xe4, 0x8e, 0xab, 0xdc, 0x2f, 0x79, 0x72, 0x55, + 0xd0, 0x9a, 0xd5, 0x7e, 0xc9, 0x63, 0x4e, 0x8a, 0x5b, 0x6c, 0xa1, 0x36, 0x9f, 0xba, 0x2e, 0x80, + 0x07, 0x9e, 0x8a, 0x4b, 0xd3, 0x7e, 0xa3, 0xa0, 0xb6, 0xfb, 0xf3, 0x0b, 0x52, 0x71, 0xa4, 0xf1, + 0x90, 0xfa, 0x01, 0x78, 0x2a, 0x26, 0x35, 0xe3, 0x73, 0x85, 0x92, 0xe2, 0xd6, 0x3e, 0xba, 0xb9, + 0xd5, 0xd7, 0x5e, 0xdc, 0xea, 0x6b, 0x2f, 0x6f, 0xf5, 0xb5, 0x5f, 0x32, 0x5d, 0xbb, 0xc9, 0x74, + 0xed, 0x45, 0xa6, 0x6b, 0x2f, 0x33, 0x5d, 0xfb, 0x2b, 0xd3, 0xb5, 0x5f, 0xff, 0xd6, 0xd7, 0xbe, + 0x5f, 0x9f, 0x1d, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x73, 0xe7, 0x7a, 0xb8, 0x08, 0x00, + 0x00, +} + func (m *Job) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -93,41 +271,52 @@ func (m *Job) Marshal() (dAtA []byte, err error) { } func (m *Job) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -135,49 +324,62 @@ func (m *JobCondition) Marshal() (dAtA []byte, err error) { } func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,37 +387,46 @@ func (m *JobList) Marshal() (dAtA []byte, err error) { } func (m *JobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -223,70 +434,79 @@ func (m *JobSpec) Marshal() (dAtA []byte, err error) { } func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Parallelism != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + if m.TTLSecondsAfterFinished != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + i-- + dAtA[i] = 0x40 } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + if m.BackoffLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + i-- + dAtA[i] = 0x38 } - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n7 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 if m.ManualSelector != nil { - dAtA[i] = 0x28 - i++ + i-- if *m.ManualSelector { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x28 } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n8 - if m.BackoffLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x18 } - if m.TTLSecondsAfterFinished != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + if m.Completions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + i-- + dAtA[i] = 0x10 + } + if m.Parallelism != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *JobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -294,64 +514,80 @@ func (m *JobStatus) Marshal() (dAtA []byte, err error) { } func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) + i-- + dAtA[i] = 0x20 + if m.CompletionTime != nil { + { + size, err := m.CompletionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } if m.StartTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n9, err := m.StartTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0x12 } - if m.CompletionTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CompletionTime.Size())) - n10, err := m.CompletionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n10 } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Job) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -364,6 +600,9 @@ func (m *Job) Size() (n int) { } func (m *JobCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -382,6 +621,9 @@ func (m *JobCondition) Size() (n int) { } func (m *JobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -396,6 +638,9 @@ func (m *JobList) Size() (n int) { } func (m *JobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Parallelism != nil { @@ -426,6 +671,9 @@ func (m *JobSpec) Size() (n int) { } func (m *JobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -449,14 +697,7 @@ func (m *JobStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -466,7 +707,7 @@ func (this *Job) String() string { return "nil" } s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -480,8 +721,8 @@ func (this *JobCondition) String() string { s := strings.Join([]string{`&JobCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -492,9 +733,14 @@ func (this *JobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Job{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Job", "Job", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -507,9 +753,9 @@ func (this *JobSpec) String() string { `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, `Completions:` + valueToStringGenerated(this.Completions) + `,`, `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, `}`, @@ -520,10 +766,15 @@ func (this *JobStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]JobCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "JobCondition", "JobCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&JobStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "v1.Time", 1) + `,`, `Active:` + fmt.Sprintf("%v", this.Active) + `,`, `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, @@ -554,7 +805,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -582,7 +833,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -591,6 +842,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -612,7 +866,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -621,6 +875,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -642,7 +899,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -651,6 +908,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -667,6 +927,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -694,7 +957,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -722,7 +985,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -732,6 +995,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -751,7 +1017,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -761,6 +1027,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -780,7 +1049,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -789,6 +1058,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -810,7 +1082,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -819,6 +1091,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -840,7 +1115,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -850,6 +1125,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -869,7 +1147,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -879,6 +1157,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -893,6 +1174,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -920,7 +1204,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -948,7 +1232,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -957,6 +1241,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -978,7 +1265,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -987,6 +1274,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1004,6 +1294,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1031,7 +1324,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1059,7 +1352,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1079,7 +1372,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1099,7 +1392,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1119,7 +1412,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1128,11 +1421,14 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1152,7 +1448,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1173,7 +1469,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1182,6 +1478,9 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1203,7 +1502,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1223,7 +1522,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1238,6 +1537,9 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1265,7 +1567,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1293,7 +1595,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1302,6 +1604,9 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1324,7 +1629,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1333,11 +1638,14 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.StartTime = &v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1357,7 +1665,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1366,11 +1674,14 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.CompletionTime = &v1.Time{} } if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1390,7 +1701,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift + m.Active |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1409,7 +1720,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift + m.Succeeded |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1428,7 +1739,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift + m.Failed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1442,6 +1753,9 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1508,10 +1822,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1540,6 +1857,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1558,70 +1878,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 929 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, - 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, - 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, - 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, - 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, - 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xdf, - 0x39, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, - 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, - 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, - 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, - 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, - 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, - 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, - 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, - 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, - 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x84, 0x4a, 0x4b, 0x33, 0x9e, 0x8c, 0x24, - 0xc0, 0x4d, 0xc9, 0x36, 0x67, 0xc7, 0xe6, 0x33, 0xe7, 0x67, 0x70, 0xc5, 0x25, 0x08, 0x6a, 0xe3, - 0x9b, 0xd4, 0x58, 0xcb, 0x52, 0x03, 0x55, 0x18, 0x29, 0x5d, 0xf1, 0x57, 0x68, 0x83, 0xc7, 0xe0, - 0x76, 0xd6, 0x95, 0xfb, 0x3b, 0xe6, 0x92, 0xf9, 0x9b, 0x17, 0xcc, 0xe9, 0xc7, 0xe0, 0xda, 0x3b, - 0x85, 0xd3, 0x86, 0x3c, 0x11, 0xa5, 0xc3, 0xe7, 0x68, 0x93, 0x0b, 0x2a, 0xa6, 0xbc, 0xd3, 0x50, - 0x0e, 0xfa, 0x4a, 0x07, 0xc5, 0xb2, 0xf7, 0x0a, 0x8f, 0xcd, 0xfc, 0x4c, 0x0a, 0x75, 0xf7, 0xcf, - 0x06, 0xda, 0xb9, 0x60, 0xce, 0x19, 0x8b, 0x3c, 0x5f, 0xf8, 0x2c, 0xc2, 0xa7, 0x68, 0x43, 0x5c, - 0xc7, 0xa0, 0x3e, 0xbb, 0x6d, 0x3f, 0x99, 0x97, 0x1e, 0x5c, 0xc7, 0xf0, 0x2a, 0x35, 0xf6, 0xeb, - 0x5c, 0x89, 0x11, 0xc5, 0xc6, 0xbd, 0xb2, 0x9d, 0x75, 0xa5, 0x3b, 0x5d, 0x2c, 0xf7, 0x2a, 0x35, - 0x96, 0xa4, 0xc3, 0x2c, 0x9d, 0x16, 0x9b, 0xc2, 0x23, 0xb4, 0x1b, 0x50, 0x2e, 0xae, 0x12, 0xe6, - 0xc0, 0xc0, 0x0f, 0xa1, 0xf8, 0xc6, 0x0f, 0x1f, 0xf6, 0x06, 0x52, 0x61, 0x1f, 0x14, 0x0d, 0xec, - 0xf6, 0xea, 0x46, 0x64, 0xd1, 0x17, 0xcf, 0x10, 0x96, 0xc0, 0x20, 0xa1, 0x11, 0xcf, 0x3f, 0x49, - 0x56, 0xdb, 0x78, 0xed, 0x6a, 0x87, 0x45, 0x35, 0xdc, 0xbb, 0xe7, 0x46, 0x96, 0x54, 0xc0, 0xef, - 0xa3, 0xcd, 0x04, 0x28, 0x67, 0x51, 0xa7, 0xa9, 0xc6, 0x55, 0xbe, 0x0e, 0x51, 0x28, 0x29, 0x6e, - 0xf1, 0x07, 0x68, 0x2b, 0x04, 0xce, 0xe9, 0x08, 0x3a, 0x9b, 0x8a, 0xf8, 0xa8, 0x20, 0x6e, 0x5d, - 0xe6, 0x30, 0x99, 0xdf, 0x77, 0x7f, 0xd7, 0xd0, 0xd6, 0x05, 0x73, 0x7a, 0x3e, 0x17, 0xf8, 0x87, - 0x7b, 0xf1, 0x35, 0x1f, 0xf6, 0x31, 0x52, 0xad, 0xc2, 0xbb, 0x5f, 0xd4, 0x69, 0xcd, 0x91, 0x5a, - 0x74, 0xbf, 0x44, 0x4d, 0x5f, 0x40, 0x28, 0x9f, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, - 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, - 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, - 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x87, 0x99, - 0xc7, 0xad, 0x90, 0x9c, 0x55, 0x30, 0xa9, 0x73, 0xf0, 0x33, 0x74, 0x40, 0x5d, 0xe1, 0xcf, 0xe0, - 0x6b, 0xa0, 0x5e, 0xe0, 0x47, 0xd0, 0x07, 0x97, 0x45, 0x5e, 0xfe, 0xd7, 0x69, 0xd8, 0x6f, 0x65, - 0xa9, 0x71, 0xf0, 0x74, 0x19, 0x81, 0x2c, 0xd7, 0xe1, 0x1f, 0x51, 0x8b, 0x43, 0x00, 0xae, 0x60, - 0x49, 0x11, 0x96, 0x4f, 0x1e, 0x38, 0x5f, 0xea, 0x40, 0xd0, 0x2f, 0xa4, 0xf6, 0x8e, 0x1c, 0xf0, - 0xfc, 0x44, 0x4a, 0x4b, 0xfc, 0x39, 0xda, 0x0b, 0x69, 0x34, 0xa5, 0x25, 0x53, 0xa5, 0xa4, 0x65, - 0xe3, 0x2c, 0x35, 0xf6, 0x2e, 0x17, 0x6e, 0xc8, 0x1d, 0x26, 0xfe, 0x16, 0xb5, 0x04, 0x84, 0x71, - 0x40, 0x45, 0x1e, 0x99, 0xed, 0x93, 0xf7, 0xea, 0xef, 0x23, 0xff, 0x79, 0xb2, 0x91, 0x2b, 0xe6, - 0x0d, 0x0a, 0x9a, 0x5a, 0x31, 0xe5, 0x7b, 0xcf, 0x51, 0x52, 0xda, 0xe0, 0x53, 0xb4, 0xe3, 0x50, - 0x77, 0xc2, 0x86, 0xc3, 0x9e, 0x1f, 0xfa, 0xa2, 0xb3, 0xa5, 0x46, 0xbe, 0x9f, 0xa5, 0xc6, 0x8e, - 0x5d, 0xc3, 0xc9, 0x02, 0x0b, 0x3f, 0x47, 0x8f, 0x85, 0x08, 0x8a, 0x89, 0x3d, 0x1d, 0x0a, 0x48, - 0xce, 0xfd, 0xc8, 0xe7, 0x63, 0xf0, 0x3a, 0x2d, 0x65, 0xf0, 0x76, 0x96, 0x1a, 0x8f, 0x07, 0x83, - 0xde, 0x32, 0x0a, 0x59, 0xa5, 0xed, 0xfe, 0xd6, 0x40, 0xed, 0x72, 0xab, 0xe1, 0xe7, 0x08, 0xb9, - 0xf3, 0x1d, 0xc2, 0x3b, 0x9a, 0xca, 0xe3, 0xbb, 0xab, 0xf2, 0x58, 0x6e, 0x9b, 0x6a, 0x35, 0x97, - 0x10, 0x27, 0x35, 0x23, 0xfc, 0x1d, 0x6a, 0x73, 0x41, 0x13, 0xa1, 0xb6, 0xc1, 0xfa, 0x6b, 0x6f, - 0x83, 0xdd, 0x2c, 0x35, 0xda, 0xfd, 0xb9, 0x01, 0xa9, 0xbc, 0xf0, 0x10, 0xed, 0x55, 0xc1, 0xfc, - 0x9f, 0x9b, 0x4d, 0xa5, 0xe0, 0x6c, 0xc1, 0x85, 0xdc, 0x71, 0x95, 0xfb, 0x25, 0x4f, 0xae, 0x8a, - 0x67, 0xb3, 0xda, 0x2f, 0x79, 0xcc, 0x49, 0x71, 0x8b, 0x2d, 0xd4, 0xe6, 0x53, 0xd7, 0x05, 0xf0, - 0xc0, 0x53, 0x21, 0x6b, 0xda, 0x6f, 0x14, 0xd4, 0x76, 0x7f, 0x7e, 0x41, 0x2a, 0x8e, 0x34, 0x1e, - 0x52, 0x3f, 0x00, 0x4f, 0x85, 0xab, 0x66, 0x7c, 0xae, 0x50, 0x52, 0xdc, 0xda, 0x47, 0x37, 0xb7, - 0xfa, 0xda, 0x8b, 0x5b, 0x7d, 0xed, 0xe5, 0xad, 0xbe, 0xf6, 0x4b, 0xa6, 0x6b, 0x37, 0x99, 0xae, - 0xbd, 0xc8, 0x74, 0xed, 0x65, 0xa6, 0x6b, 0x7f, 0x65, 0xba, 0xf6, 0xeb, 0xdf, 0xfa, 0xda, 0xf7, - 0xeb, 0xb3, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x13, 0xdb, 0x98, 0xf9, 0xb8, 0x08, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 039149da..75de45f1 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -32,17 +32,17 @@ option go_package = "v1"; // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobSpec spec = 2; // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobStatus status = 3; } @@ -75,7 +75,7 @@ message JobCondition { // JobList is a collection of jobs. message JobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 8dad9043..646a3cd7 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -28,17 +28,17 @@ import ( type Job struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -49,7 +49,7 @@ type Job struct { type JobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index d8e2bdd7..0120e07d 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of Jobs.", } diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 88cb0167..beba55ac 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -75,7 +75,7 @@ func (in *JobCondition) DeepCopy() *JobCondition { func (in *JobList) DeepCopyInto(out *JobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Job, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 36342a3a..837a2f9c 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -17,33 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -56,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJob proto.InternalMessageInfo + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CronJobList proto.InternalMessageInfo -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob") @@ -88,10 +221,68 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_e57b277b05179ae7) +} + +var fileDescriptor_e57b277b05179ae7 = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, + 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, + 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, + 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, + 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, + 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, + 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, + 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, + 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, + 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, + 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, + 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, + 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, + 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, + 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, + 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, + 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, + 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, + 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, + 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, + 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, + 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, + 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, + 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, + 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, + 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, + 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, + 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, + 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, + 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, + 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, + 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, + 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, + 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, + 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, + 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, + 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, + 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, + 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, + 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, + 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, + 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, + 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, + 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, + 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, + 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, + 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, + 0x07, 0x00, 0x00, +} + func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -99,41 +290,52 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -141,37 +343,46 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -179,58 +390,67 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.Suspend != nil { - dAtA[i] = 0x20 - i++ + i-- if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x20 } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -238,39 +458,48 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.LastScheduleTime != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n6 } - return i, nil + return len(dAtA) - i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -278,33 +507,42 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -312,39 +550,53 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -357,6 +609,9 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -371,6 +626,9 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Schedule) @@ -395,6 +653,9 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Active) > 0 { @@ -411,6 +672,9 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -421,6 +685,9 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -431,14 +698,7 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -448,7 +708,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -459,9 +719,14 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -486,9 +751,14 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -498,7 +768,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -509,8 +779,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -538,7 +808,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -566,7 +836,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -575,6 +845,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -596,7 +869,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -605,6 +878,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -626,7 +902,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -635,6 +911,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -651,6 +930,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -678,7 +960,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -706,7 +988,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -715,6 +997,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -736,7 +1021,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -745,6 +1030,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -762,6 +1050,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -789,7 +1080,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -817,7 +1108,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -827,6 +1118,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -846,7 +1140,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -866,7 +1160,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -876,6 +1170,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -895,7 +1192,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -916,7 +1213,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -925,6 +1222,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -946,7 +1246,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -966,7 +1266,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -981,6 +1281,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1008,7 +1311,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1036,7 +1339,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1045,10 +1348,13 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + m.Active = append(m.Active, v11.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1067,7 +1373,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1076,11 +1382,14 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1095,6 +1404,9 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1122,7 +1434,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1150,7 +1462,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1159,6 +1471,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1180,7 +1495,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1189,6 +1504,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1205,6 +1523,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1232,7 +1553,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1260,7 +1581,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1269,6 +1590,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1290,7 +1614,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1299,6 +1623,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1315,6 +1642,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1381,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1413,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1431,60 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 771 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, - 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, - 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, - 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, - 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, - 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, - 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, - 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, - 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, - 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, - 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, - 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, - 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, - 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, - 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, - 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, - 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, - 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, - 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, - 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, - 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, - 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, - 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, - 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, - 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, - 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, - 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, - 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, - 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, - 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, - 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, - 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, - 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, - 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, - 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, - 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, - 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, - 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, - 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, - 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, - 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, - 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, - 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, - 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, - 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, - 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, - 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, - 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index 043b3551..995b4f3f 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v1beta1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -112,12 +112,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -125,12 +125,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index cb5c9bad..2978747a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index abbdfec0..ecc91444 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 1c8bc447..7c9dcb74 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 4d9ba5c0..8271c841 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -17,33 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto -/* - Package v2alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ package v2alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -56,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJob proto.InternalMessageInfo + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CronJobList proto.InternalMessageInfo -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") @@ -88,10 +221,68 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptor_5495a0550fe29c46) +} + +var fileDescriptor_5495a0550fe29c46 = []byte{ + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, + 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, + 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, + 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, + 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, + 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, + 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, + 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, + 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, + 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, + 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, + 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, + 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, + 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, + 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, + 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, + 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, + 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, + 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, + 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, + 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, + 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, + 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, + 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, + 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, + 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, + 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, + 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, + 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, + 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, + 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, + 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, + 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, + 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, + 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, + 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, + 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, + 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, + 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, + 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, + 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, + 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, + 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, + 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, + 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, + 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, + 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, + 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, + 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, +} + func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -99,41 +290,52 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -141,37 +343,46 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -179,58 +390,67 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.Suspend != nil { - dAtA[i] = 0x20 - i++ + i-- if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x20 } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -238,39 +458,48 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.LastScheduleTime != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n6 } - return i, nil + return len(dAtA) - i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -278,33 +507,42 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -312,39 +550,53 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -357,6 +609,9 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -371,6 +626,9 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Schedule) @@ -395,6 +653,9 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Active) > 0 { @@ -411,6 +672,9 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -421,6 +685,9 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -431,14 +698,7 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -448,7 +708,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -459,9 +719,14 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -486,9 +751,14 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -498,7 +768,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -509,8 +779,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -538,7 +808,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -566,7 +836,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -575,6 +845,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -596,7 +869,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -605,6 +878,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -626,7 +902,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -635,6 +911,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -651,6 +930,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -678,7 +960,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -706,7 +988,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -715,6 +997,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -736,7 +1021,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -745,6 +1030,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -762,6 +1050,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -789,7 +1080,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -817,7 +1108,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -827,6 +1118,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -846,7 +1140,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -866,7 +1160,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -876,6 +1170,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -895,7 +1192,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -916,7 +1213,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -925,6 +1222,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -946,7 +1246,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -966,7 +1266,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -981,6 +1281,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1008,7 +1311,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1036,7 +1339,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1045,10 +1348,13 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + m.Active = append(m.Active, v11.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1067,7 +1373,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1076,11 +1382,14 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1095,6 +1404,9 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1122,7 +1434,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1150,7 +1462,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1159,6 +1471,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1180,7 +1495,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1189,6 +1504,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1205,6 +1523,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1232,7 +1553,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1260,7 +1581,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1269,6 +1590,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1290,7 +1614,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1299,6 +1623,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1315,6 +1642,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1381,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1413,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1431,60 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, - 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, - 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, - 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, - 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, - 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, - 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, - 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, - 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, - 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, - 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, - 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, - 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, - 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, - 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, - 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, - 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, - 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, - 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, - 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, - 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, - 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, - 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, - 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, - 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, - 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, - 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, - 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, - 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, - 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, - 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, - 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, - 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, - 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, - 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, - 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, - 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, - 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, - 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, - 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, - 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, - 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, - 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, - 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, - 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, - 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, - 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, - 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, - 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 4321c336..0bba13b8 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v2alpha1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -110,12 +110,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -123,12 +123,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go index cccff94f..465e614a 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index f448a92c..bc80eca4 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v2alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index 20d87e7e..1b03f674 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 19bf225f..2e61b568 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -17,32 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto - - It has these top-level messages: - CertificateSigningRequest - CertificateSigningRequestCondition - CertificateSigningRequestList - CertificateSigningRequestSpec - CertificateSigningRequestStatus - ExtraValue -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -58,49 +47,244 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_09d156762b8218ef, []int{0} +} +func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequest.Merge(m, src) +} +func (m *CertificateSigningRequest) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequest.DiscardUnknown(m) } +var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo + func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_09d156762b8218ef, []int{1} +} +func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestCondition.Merge(m, src) +} +func (m *CertificateSigningRequestCondition) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestCondition.DiscardUnknown(m) } +var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo + func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptor_09d156762b8218ef, []int{2} +} +func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *CertificateSigningRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestList.Merge(m, src) +} +func (m *CertificateSigningRequestList) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestList) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestList.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_09d156762b8218ef, []int{3} } +func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestSpec.Merge(m, src) +} +func (m *CertificateSigningRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_09d156762b8218ef, []int{4} +} +func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestStatus.Merge(m, src) +} +func (m *CertificateSigningRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestStatus.DiscardUnknown(m) } -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{5} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func init() { proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry") proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_09d156762b8218ef) +} + +var fileDescriptor_09d156762b8218ef = []byte{ + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4b, 0x8f, 0x1b, 0x45, + 0x10, 0xf6, 0xf8, 0xb5, 0x76, 0x7b, 0xd9, 0x44, 0x2d, 0x14, 0x0d, 0x2b, 0x65, 0x66, 0x35, 0x02, + 0xb4, 0x3c, 0xd2, 0xc3, 0x46, 0x08, 0x56, 0x7b, 0x40, 0x30, 0x4b, 0x04, 0x2b, 0x12, 0x21, 0x75, + 0x62, 0x0e, 0x08, 0x89, 0xb4, 0xc7, 0x95, 0x71, 0xc7, 0x99, 0x07, 0xd3, 0x3d, 0x06, 0xdf, 0xf2, + 0x13, 0x38, 0x72, 0x41, 0xe2, 0x97, 0x70, 0x5e, 0x0e, 0x48, 0x39, 0xe6, 0x80, 0x2c, 0xd6, 0xfc, + 0x8b, 0x9c, 0x50, 0xf7, 0xb4, 0x3d, 0xc6, 0x2b, 0xe3, 0x28, 0x7b, 0x9b, 0xfa, 0xaa, 0xbe, 0xaf, + 0x1e, 0x5d, 0x35, 0xe8, 0xcb, 0xf1, 0xb1, 0x20, 0x3c, 0xf5, 0xc7, 0xc5, 0x00, 0xf2, 0x04, 0x24, + 0x08, 0x7f, 0x02, 0xc9, 0x30, 0xcd, 0x7d, 0xe3, 0x60, 0x19, 0xf7, 0x43, 0xc8, 0x25, 0x7f, 0xc4, + 0x43, 0xa6, 0xdd, 0x47, 0x03, 0x90, 0xec, 0xc8, 0x8f, 0x20, 0x81, 0x9c, 0x49, 0x18, 0x92, 0x2c, + 0x4f, 0x65, 0x8a, 0xdd, 0x92, 0x40, 0x58, 0xc6, 0xc9, 0x2a, 0x81, 0x18, 0xc2, 0xfe, 0xad, 0x88, + 0xcb, 0x51, 0x31, 0x20, 0x61, 0x1a, 0xfb, 0x51, 0x1a, 0xa5, 0xbe, 0xe6, 0x0d, 0x8a, 0x47, 0xda, + 0xd2, 0x86, 0xfe, 0x2a, 0xf5, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, + 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x58, + 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc2, 0x47, 0xdb, 0x08, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x79, + 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, + 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, + 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x8a, 0x26, 0x93, 0x23, + 0xf2, 0xf5, 0xe0, 0x31, 0x84, 0xf2, 0x1e, 0x48, 0x16, 0xe0, 0xf3, 0x99, 0x5b, 0x9b, 0xcf, 0x5c, + 0x54, 0x61, 0x74, 0xa9, 0x8a, 0x1f, 0xa2, 0xa6, 0xc8, 0x20, 0xb4, 0xeb, 0x5a, 0xfd, 0x13, 0xb2, + 0x65, 0xfa, 0x64, 0x63, 0xad, 0xf7, 0x33, 0x08, 0x83, 0x5d, 0x93, 0xab, 0xa9, 0x2c, 0xaa, 0x95, + 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4e, + 0xb0, 0x67, 0xb2, 0xb4, 0x4b, 0x9b, 0x1a, 0x7d, 0xef, 0xd7, 0x3a, 0xf2, 0x36, 0x72, 0x4f, 0xd3, + 0x64, 0xc8, 0x25, 0x4f, 0x13, 0x7c, 0x8c, 0x9a, 0x72, 0x9a, 0x81, 0x1e, 0x68, 0x37, 0x78, 0x73, + 0x51, 0xf2, 0x83, 0x69, 0x06, 0x2f, 0x66, 0xee, 0xeb, 0xeb, 0xf1, 0x0a, 0xa7, 0x9a, 0x81, 0xdf, + 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x5e, 0xfc, + 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0x81, 0x3b, 0xf7, 0x4a, + 0x98, 0x2e, 0xfc, 0xf8, 0x31, 0xda, 0x7b, 0xc2, 0x84, 0xec, 0x67, 0x43, 0x26, 0xe1, 0x01, 0x8f, + 0xc1, 0x6e, 0xea, 0x29, 0xbd, 0xfb, 0x72, 0xef, 0xac, 0x18, 0xc1, 0x0d, 0xa3, 0xbe, 0x77, 0xf7, + 0x3f, 0x4a, 0x74, 0x4d, 0xd9, 0x9b, 0x59, 0xe8, 0xe6, 0xc6, 0xf9, 0xdc, 0xe5, 0x42, 0xe2, 0xef, + 0x2e, 0xed, 0x1b, 0x79, 0xb9, 0x3a, 0x14, 0x5b, 0x6f, 0xdb, 0x75, 0x53, 0x4b, 0x67, 0x81, 0xac, + 0xec, 0xda, 0xf7, 0xa8, 0xc5, 0x25, 0xc4, 0xc2, 0xae, 0x1f, 0x34, 0x0e, 0x7b, 0xb7, 0x4f, 0x5e, + 0x7d, 0x11, 0x82, 0xd7, 0x4c, 0x9a, 0xd6, 0x99, 0x12, 0xa4, 0xa5, 0xae, 0xf7, 0x7b, 0xe3, 0x7f, + 0x1a, 0x54, 0x2b, 0x89, 0xdf, 0x42, 0x3b, 0x79, 0x69, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0x7a, 0x15, + 0x13, 0x41, 0x17, 0x3e, 0x4c, 0x50, 0xbb, 0x50, 0xcf, 0x23, 0xec, 0xd6, 0x41, 0xe3, 0xb0, 0x1b, + 0xdc, 0x50, 0x8f, 0xdc, 0xd7, 0xc8, 0x8b, 0x99, 0xdb, 0xf9, 0x0a, 0xa6, 0xda, 0xa0, 0x26, 0x0a, + 0xbf, 0x8f, 0x3a, 0x85, 0x80, 0x3c, 0x61, 0x31, 0x98, 0xd5, 0x58, 0xce, 0xa1, 0x6f, 0x70, 0xba, + 0x8c, 0xc0, 0x37, 0x51, 0xa3, 0xe0, 0x43, 0xb3, 0x1a, 0x3d, 0x13, 0xd8, 0xe8, 0x9f, 0x7d, 0x4e, + 0x15, 0x8e, 0x3d, 0xd4, 0x8e, 0xf2, 0xb4, 0xc8, 0x84, 0xdd, 0xd4, 0xc9, 0x91, 0x4a, 0xfe, 0x85, + 0x46, 0xa8, 0xf1, 0xe0, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, + 0xbb, 0x25, 0x77, 0x94, 0xd6, 0x9d, 0x44, 0xe6, 0xd3, 0x6a, 0xb2, 0x1a, 0xa3, 0x65, 0x9a, 0x7d, + 0x40, 0xa8, 0x8a, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x80, 0xa8, 0xfa, 0xc4, 0x9f, 0xa1, + 0xd6, 0x84, 0x3d, 0x29, 0xc0, 0xfc, 0x47, 0xde, 0xdb, 0x5a, 0x8f, 0x56, 0xfb, 0x46, 0x51, 0x68, + 0xc9, 0x3c, 0xa9, 0x1f, 0x5b, 0xde, 0x9f, 0x16, 0x72, 0xb7, 0x5c, 0x3f, 0xfe, 0x11, 0xa1, 0x70, + 0x71, 0x9b, 0xc2, 0xb6, 0x74, 0xff, 0xa7, 0xaf, 0xde, 0xff, 0xf2, 0xce, 0xab, 0x1f, 0xe5, 0x12, + 0x12, 0x74, 0x25, 0x15, 0x3e, 0x42, 0xbd, 0x15, 0x69, 0xdd, 0xe9, 0x6e, 0x70, 0x6d, 0x3e, 0x73, + 0x7b, 0x2b, 0xe2, 0x74, 0x35, 0xc6, 0xfb, 0xd8, 0x8c, 0x4d, 0x37, 0x8a, 0xdd, 0xc5, 0xfe, 0x5b, + 0xfa, 0x5d, 0xbb, 0xeb, 0xfb, 0x7b, 0xd2, 0xf9, 0xe5, 0x37, 0xb7, 0xf6, 0xf4, 0xaf, 0x83, 0x5a, + 0x70, 0xeb, 0xfc, 0xc2, 0xa9, 0x3d, 0xbb, 0x70, 0x6a, 0xcf, 0x2f, 0x9c, 0xda, 0xd3, 0xb9, 0x63, + 0x9d, 0xcf, 0x1d, 0xeb, 0xd9, 0xdc, 0xb1, 0x9e, 0xcf, 0x1d, 0xeb, 0xef, 0xb9, 0x63, 0xfd, 0xfc, + 0x8f, 0x53, 0xfb, 0x76, 0xc7, 0x74, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x39, 0x0e, 0xb6, + 0xcd, 0x7f, 0x07, 0x00, 0x00, +} + func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -108,41 +292,52 @@ func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -150,37 +345,47 @@ func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) } func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -188,37 +393,46 @@ func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -226,92 +440,86 @@ func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) - i += copy(dAtA[i:], m.Request) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Usages) > 0 { - for _, s := range m.Usages { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x32 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n6, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Usages) > 0 { + for iNdEx := len(m.Usages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Usages[iNdEx]) + copy(dAtA[i:], m.Usages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Usages[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - return i, nil + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x1a + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + if m.Request != nil { + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -319,35 +527,43 @@ func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.Certificate != nil { + i -= len(m.Certificate) + copy(dAtA[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i-- + dAtA[i] = 0x12 + } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - if m.Certificate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) - i += copy(dAtA[i:], m.Certificate) - } - return i, nil + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -355,38 +571,42 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CertificateSigningRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -399,6 +619,9 @@ func (m *CertificateSigningRequest) Size() (n int) { } func (m *CertificateSigningRequestCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -413,6 +636,9 @@ func (m *CertificateSigningRequestCondition) Size() (n int) { } func (m *CertificateSigningRequestList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -427,6 +653,9 @@ func (m *CertificateSigningRequestList) Size() (n int) { } func (m *CertificateSigningRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -462,6 +691,9 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { } func (m *CertificateSigningRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -478,6 +710,9 @@ func (m *CertificateSigningRequestStatus) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -490,14 +725,7 @@ func (m ExtraValue) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -507,7 +735,7 @@ func (this *CertificateSigningRequest) String() string { return "nil" } s := strings.Join([]string{`&CertificateSigningRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -522,7 +750,7 @@ func (this *CertificateSigningRequestCondition) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -531,9 +759,14 @@ func (this *CertificateSigningRequestList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CertificateSigningRequest{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CertificateSigningRequestList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -567,8 +800,13 @@ func (this *CertificateSigningRequestStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]CertificateSigningRequestCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&CertificateSigningRequestStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, `}`, }, "") @@ -597,7 +835,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -625,7 +863,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -634,6 +872,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -655,7 +896,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -664,6 +905,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -685,7 +929,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -694,6 +938,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -710,6 +957,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -737,7 +987,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -765,7 +1015,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -775,6 +1025,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -794,7 +1047,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -804,6 +1057,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -823,7 +1079,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -833,6 +1089,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -852,7 +1111,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -861,6 +1120,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -877,6 +1139,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -904,7 +1169,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -932,7 +1197,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -941,6 +1206,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -962,7 +1230,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -971,6 +1239,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -988,6 +1259,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1015,7 +1289,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1043,7 +1317,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1052,6 +1326,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1074,7 +1351,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1084,6 +1361,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1103,7 +1383,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1113,6 +1393,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1132,7 +1415,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1142,6 +1425,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1161,7 +1447,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1171,6 +1457,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1190,7 +1479,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1199,6 +1488,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1219,7 +1511,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1236,7 +1528,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1246,6 +1538,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1262,7 +1557,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1271,7 +1566,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -1308,6 +1603,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1335,7 +1633,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1363,7 +1661,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1372,6 +1670,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1394,7 +1695,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1403,6 +1704,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1420,6 +1724,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1447,7 +1754,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1475,7 +1782,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1485,6 +1792,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1499,6 +1809,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1565,10 +1878,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1597,6 +1913,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1615,62 +1934,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x8f, 0xdb, 0x44, - 0x18, 0x8e, 0xf3, 0xb5, 0xc9, 0x64, 0xd9, 0x56, 0x23, 0x54, 0x99, 0x95, 0x6a, 0xaf, 0x2c, 0x40, - 0xcb, 0x47, 0xc7, 0x6c, 0x85, 0x60, 0xb5, 0x07, 0x04, 0x5e, 0x2a, 0x58, 0xd1, 0x0a, 0x69, 0xda, - 0x70, 0x40, 0x48, 0x74, 0xe2, 0xbc, 0x75, 0xa6, 0xa9, 0x3f, 0xf0, 0x8c, 0x03, 0xb9, 0xf5, 0x27, - 0x70, 0xe4, 0x82, 0xc4, 0x2f, 0xe1, 0xbc, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x45, 0x6c, 0xf8, 0x17, - 0x3d, 0xa1, 0x19, 0x4f, 0xe2, 0x90, 0x55, 0x48, 0xd5, 0xbd, 0x79, 0x9e, 0xf7, 0x79, 0x9e, 0xf7, - 0x63, 0xde, 0x31, 0xfa, 0x72, 0x7c, 0x2c, 0x08, 0x4f, 0xfd, 0x71, 0x31, 0x80, 0x3c, 0x01, 0x09, - 0xc2, 0x9f, 0x40, 0x32, 0x4c, 0x73, 0xdf, 0x04, 0x58, 0xc6, 0xfd, 0x10, 0x72, 0xc9, 0x1f, 0xf1, - 0x90, 0xe9, 0xf0, 0xd1, 0x00, 0x24, 0x3b, 0xf2, 0x23, 0x48, 0x20, 0x67, 0x12, 0x86, 0x24, 0xcb, - 0x53, 0x99, 0x62, 0xb7, 0x14, 0x10, 0x96, 0x71, 0xb2, 0x2a, 0x20, 0x46, 0xb0, 0x7f, 0x2b, 0xe2, - 0x72, 0x54, 0x0c, 0x48, 0x98, 0xc6, 0x7e, 0x94, 0x46, 0xa9, 0xaf, 0x75, 0x83, 0xe2, 0x91, 0x3e, - 0xe9, 0x83, 0xfe, 0x2a, 0xfd, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, - 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x54, - 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc1, 0x47, 0xdb, 0x04, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x75, - 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, - 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, - 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x62, 0x93, 0xc9, 0x11, - 0xf9, 0x7a, 0xf0, 0x18, 0x42, 0x79, 0x0f, 0x24, 0x0b, 0xf0, 0xf9, 0xcc, 0xad, 0xcd, 0x67, 0x2e, - 0xaa, 0x30, 0xba, 0x74, 0xc5, 0x0f, 0x51, 0x53, 0x64, 0x10, 0xda, 0x75, 0xed, 0xfe, 0x09, 0xd9, - 0x32, 0x7d, 0xb2, 0xb1, 0xd6, 0xfb, 0x19, 0x84, 0xc1, 0xae, 0xc9, 0xd5, 0x54, 0x27, 0xaa, 0x9d, - 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4f, - 0xb0, 0x67, 0xb2, 0xb4, 0xcb, 0x33, 0x35, 0xfe, 0xde, 0xaf, 0x75, 0xe4, 0x6d, 0xd4, 0x9e, 0xa6, - 0xc9, 0x90, 0x4b, 0x9e, 0x26, 0xf8, 0x18, 0x35, 0xe5, 0x34, 0x03, 0x3d, 0xd0, 0x6e, 0xf0, 0xe6, - 0xa2, 0xe4, 0x07, 0xd3, 0x0c, 0x5e, 0xcc, 0xdc, 0xd7, 0xd7, 0xf9, 0x0a, 0xa7, 0x5a, 0x81, 0xdf, - 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x51, 0xfc, - 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0xc4, 0x9d, 0x7b, 0x25, - 0x4c, 0x17, 0x71, 0xfc, 0x18, 0xed, 0x3d, 0x61, 0x42, 0xf6, 0xb3, 0x21, 0x93, 0xf0, 0x80, 0xc7, - 0x60, 0x37, 0xf5, 0x94, 0xde, 0x7d, 0xb9, 0x7b, 0x56, 0x8a, 0xe0, 0x86, 0x71, 0xdf, 0xbb, 0xfb, - 0x1f, 0x27, 0xba, 0xe6, 0xec, 0xcd, 0x2c, 0x74, 0x73, 0xe3, 0x7c, 0xee, 0x72, 0x21, 0xf1, 0x77, - 0x97, 0xf6, 0x8d, 0xbc, 0x5c, 0x1d, 0x4a, 0xad, 0xb7, 0xed, 0xba, 0xa9, 0xa5, 0xb3, 0x40, 0x56, - 0x76, 0xed, 0x7b, 0xd4, 0xe2, 0x12, 0x62, 0x61, 0xd7, 0x0f, 0x1a, 0x87, 0xbd, 0xdb, 0x27, 0xaf, - 0xbe, 0x08, 0xc1, 0x6b, 0x26, 0x4d, 0xeb, 0x4c, 0x19, 0xd2, 0xd2, 0xd7, 0xfb, 0xbd, 0xf1, 0x3f, - 0x0d, 0xaa, 0x95, 0xc4, 0x6f, 0xa1, 0x9d, 0xbc, 0x3c, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0xba, 0x15, - 0xc3, 0xa0, 0x8b, 0x18, 0x7e, 0x1f, 0x75, 0x0a, 0x01, 0x79, 0xc2, 0x62, 0x30, 0x57, 0xbd, 0xec, - 0xab, 0x6f, 0x70, 0xba, 0x64, 0xe0, 0x9b, 0xa8, 0x51, 0xf0, 0xa1, 0xb9, 0xea, 0x9e, 0x21, 0x36, - 0xfa, 0x67, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x0f, 0x1a, - 0x87, 0xdd, 0x00, 0xa9, 0x8d, 0xf9, 0x42, 0x23, 0xd4, 0x44, 0x30, 0x41, 0xed, 0x42, 0xed, 0x83, - 0xb0, 0x5b, 0x9a, 0x73, 0x43, 0x71, 0xfa, 0x1a, 0x79, 0x31, 0x73, 0x3b, 0x5f, 0xc1, 0x54, 0x1f, - 0xa8, 0x61, 0xe1, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, 0x77, - 0x4b, 0xee, 0x28, 0xaf, 0x3b, 0x89, 0xcc, 0xa7, 0xd5, 0x64, 0x35, 0x46, 0xcb, 0x34, 0xfb, 0x80, - 0x50, 0xc5, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x01, 0x51, 0xf5, 0x89, 0x3f, 0x43, 0xad, - 0x09, 0x7b, 0x52, 0x80, 0xf9, 0x8f, 0xbc, 0xb7, 0xb5, 0x1e, 0xed, 0xf6, 0x8d, 0x92, 0xd0, 0x52, - 0x79, 0x52, 0x3f, 0xb6, 0xbc, 0x3f, 0x2d, 0xe4, 0x6e, 0x79, 0xfd, 0xf8, 0x47, 0x84, 0xc2, 0xc5, - 0xdb, 0x14, 0xb6, 0xa5, 0xfb, 0x3f, 0x7d, 0xf5, 0xfe, 0x97, 0xef, 0xbc, 0xfa, 0x51, 0x2e, 0x21, - 0x41, 0x57, 0x52, 0xe1, 0x23, 0xd4, 0x5b, 0xb1, 0xd6, 0x9d, 0xee, 0x06, 0xd7, 0xe6, 0x33, 0xb7, - 0xb7, 0x62, 0x4e, 0x57, 0x39, 0xde, 0xc7, 0x66, 0x6c, 0xba, 0x51, 0xec, 0x2e, 0xf6, 0xdf, 0xd2, - 0x77, 0xdc, 0x5d, 0xdf, 0xdf, 0x93, 0xce, 0x2f, 0xbf, 0xb9, 0xb5, 0xa7, 0x7f, 0x1d, 0xd4, 0x82, - 0x5b, 0xe7, 0x17, 0x4e, 0xed, 0xd9, 0x85, 0x53, 0x7b, 0x7e, 0xe1, 0xd4, 0x9e, 0xce, 0x1d, 0xeb, - 0x7c, 0xee, 0x58, 0xcf, 0xe6, 0x8e, 0xf5, 0x7c, 0xee, 0x58, 0x7f, 0xcf, 0x1d, 0xeb, 0xe7, 0x7f, - 0x9c, 0xda, 0xb7, 0x3b, 0xa6, 0xbb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x6b, 0x5b, 0xf9, - 0x7f, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 1b103f15..b3e0aeb5 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -73,7 +73,7 @@ func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequ func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CertificateSigningRequest, len(*in)) diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index 349c6857..7e78be19 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -17,29 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto - - It has these top-level messages: - Lease - LeaseList - LeaseSpec -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptor_929e1148ad9baca3) +} + +var fileDescriptor_929e1148ad9baca3 = []byte{ + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, + 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, + 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, + 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, + 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, + 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, + 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, + 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, + 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, + 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, + 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, + 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, + 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, + 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, + 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, + 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, + 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, + 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, + 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, + 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, + 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, + 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, + 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, + 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, + 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, + 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, + 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, + 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, + 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, + 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, + 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, + 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, + 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, +} + func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -80,33 +187,42 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -114,37 +230,46 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { } func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,59 +277,74 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { } func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -215,6 +355,9 @@ func (m *Lease) Size() (n int) { } func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -229,6 +372,9 @@ func (m *LeaseList) Size() (n int) { } func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HolderIdentity != nil { @@ -253,14 +399,7 @@ func (m *LeaseSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -270,7 +409,7 @@ func (this *Lease) String() string { return "nil" } s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -280,9 +419,14 @@ func (this *LeaseList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -294,8 +438,8 @@ func (this *LeaseSpec) String() string { s := strings.Join([]string{`&LeaseSpec{`, `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") @@ -324,7 +468,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -352,7 +496,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -361,6 +505,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -382,7 +529,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -391,6 +538,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -407,6 +557,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -434,7 +587,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -462,7 +615,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -471,6 +624,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -492,7 +648,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -501,6 +657,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -518,6 +677,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -545,7 +707,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -573,7 +735,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -583,6 +745,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -603,7 +768,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -623,7 +788,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -632,11 +797,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.AcquireTime = &v1.MicroTime{} } if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -656,7 +824,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -665,11 +833,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.RenewTime = &v1.MicroTime{} } if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -689,7 +860,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -704,6 +875,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -770,10 +944,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -802,6 +979,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -820,45 +1000,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 535 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, - 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, - 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, - 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, - 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, - 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, - 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, - 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, - 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, - 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, - 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, - 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, - 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, - 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, - 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, - 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, - 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, - 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, - 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, - 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, - 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, - 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, - 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, - 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, - 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, - 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, - 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, - 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, - 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, - 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, - 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, - 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, - 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 99692e95..4206746d 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -30,12 +30,12 @@ option go_package = "v1"; // Lease defines a lease concept. message Lease { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; } @@ -43,7 +43,7 @@ message Lease { // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index 8f9f24d0..7a5605ac 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -26,12 +26,12 @@ import ( // Lease defines a lease concept. type Lease struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -65,7 +65,7 @@ type LeaseSpec struct { type LeaseList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index bd02ad5d..0f144043 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go index 0f534055..2dd7eddb 100644 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *Lease) DeepCopyObject() runtime.Object { func (in *LeaseList) DeepCopyInto(out *LeaseList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Lease, len(*in)) diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index aa57e9dd..2463d625 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -17,29 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto - - It has these top-level messages: - Lease - LeaseList - LeaseSpec -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_daca6bcd2ff63a80) +} + +var fileDescriptor_daca6bcd2ff63a80 = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, + 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, + 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, + 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, + 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, + 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, + 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, + 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, + 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, + 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, + 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, + 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, + 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, + 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, + 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, + 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, + 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, + 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, + 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, + 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, + 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, + 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, + 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, + 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, + 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, + 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, + 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, + 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, + 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, + 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, + 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, + 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, +} + func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -80,33 +187,42 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -114,37 +230,46 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { } func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,59 +277,74 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { } func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -215,6 +355,9 @@ func (m *Lease) Size() (n int) { } func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -229,6 +372,9 @@ func (m *LeaseList) Size() (n int) { } func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HolderIdentity != nil { @@ -253,14 +399,7 @@ func (m *LeaseSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -270,7 +409,7 @@ func (this *Lease) String() string { return "nil" } s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -280,9 +419,14 @@ func (this *LeaseList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -294,8 +438,8 @@ func (this *LeaseSpec) String() string { s := strings.Join([]string{`&LeaseSpec{`, `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") @@ -324,7 +468,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -352,7 +496,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -361,6 +505,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -382,7 +529,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -391,6 +538,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -407,6 +557,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -434,7 +587,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -462,7 +615,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -471,6 +624,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -492,7 +648,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -501,6 +657,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -518,6 +677,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -545,7 +707,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -573,7 +735,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -583,6 +745,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -603,7 +768,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -623,7 +788,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -632,11 +797,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.AcquireTime = &v1.MicroTime{} } if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -656,7 +824,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -665,11 +833,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.RenewTime = &v1.MicroTime{} } if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -689,7 +860,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -704,6 +875,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -770,10 +944,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -802,6 +979,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -820,45 +1000,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, - 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, - 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, - 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, - 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, - 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, - 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, - 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, - 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, - 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, - 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, - 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, - 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, - 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, - 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, - 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, - 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, - 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, - 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, - 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, - 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, - 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, - 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, - 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, - 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, - 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, - 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, - 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, - 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, - 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, - 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, - 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 918e0de1..cfc2711c 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -30,12 +30,12 @@ option go_package = "v1beta1"; // Lease defines a lease concept. message Lease { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; } @@ -43,7 +43,7 @@ message Lease { // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 846f7280..da88f675 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -26,12 +26,12 @@ import ( // Lease defines a lease concept. type Lease struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -65,7 +65,7 @@ type LeaseSpec struct { type LeaseList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index 4532d322..f557d265 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index a628ac19..de696213 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *Lease) DeepCopyObject() runtime.Object { func (in *LeaseList) DeepCopyInto(out *LeaseList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Lease, len(*in)) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 058e03eb..8f788035 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -17,229 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto - - It has these top-level messages: - AWSElasticBlockStoreVolumeSource - Affinity - AttachedVolume - AvoidPods - AzureDiskVolumeSource - AzureFilePersistentVolumeSource - AzureFileVolumeSource - Binding - CSIPersistentVolumeSource - CSIVolumeSource - Capabilities - CephFSPersistentVolumeSource - CephFSVolumeSource - CinderPersistentVolumeSource - CinderVolumeSource - ClientIPConfig - ComponentCondition - ComponentStatus - ComponentStatusList - ConfigMap - ConfigMapEnvSource - ConfigMapKeySelector - ConfigMapList - ConfigMapNodeConfigSource - ConfigMapProjection - ConfigMapVolumeSource - Container - ContainerImage - ContainerPort - ContainerState - ContainerStateRunning - ContainerStateTerminated - ContainerStateWaiting - ContainerStatus - DaemonEndpoint - DownwardAPIProjection - DownwardAPIVolumeFile - DownwardAPIVolumeSource - EmptyDirVolumeSource - EndpointAddress - EndpointPort - EndpointSubset - Endpoints - EndpointsList - EnvFromSource - EnvVar - EnvVarSource - Event - EventList - EventSeries - EventSource - ExecAction - FCVolumeSource - FlexPersistentVolumeSource - FlexVolumeSource - FlockerVolumeSource - GCEPersistentDiskVolumeSource - GitRepoVolumeSource - GlusterfsPersistentVolumeSource - GlusterfsVolumeSource - HTTPGetAction - HTTPHeader - Handler - HostAlias - HostPathVolumeSource - ISCSIPersistentVolumeSource - ISCSIVolumeSource - KeyToPath - Lifecycle - LimitRange - LimitRangeItem - LimitRangeList - LimitRangeSpec - List - LoadBalancerIngress - LoadBalancerStatus - LocalObjectReference - LocalVolumeSource - NFSVolumeSource - Namespace - NamespaceList - NamespaceSpec - NamespaceStatus - Node - NodeAddress - NodeAffinity - NodeCondition - NodeConfigSource - NodeConfigStatus - NodeDaemonEndpoints - NodeList - NodeProxyOptions - NodeResources - NodeSelector - NodeSelectorRequirement - NodeSelectorTerm - NodeSpec - NodeStatus - NodeSystemInfo - ObjectFieldSelector - ObjectReference - PersistentVolume - PersistentVolumeClaim - PersistentVolumeClaimCondition - PersistentVolumeClaimList - PersistentVolumeClaimSpec - PersistentVolumeClaimStatus - PersistentVolumeClaimVolumeSource - PersistentVolumeList - PersistentVolumeSource - PersistentVolumeSpec - PersistentVolumeStatus - PhotonPersistentDiskVolumeSource - Pod - PodAffinity - PodAffinityTerm - PodAntiAffinity - PodAttachOptions - PodCondition - PodDNSConfig - PodDNSConfigOption - PodExecOptions - PodList - PodLogOptions - PodPortForwardOptions - PodProxyOptions - PodReadinessGate - PodSecurityContext - PodSignature - PodSpec - PodStatus - PodStatusResult - PodTemplate - PodTemplateList - PodTemplateSpec - PortworxVolumeSource - Preconditions - PreferAvoidPodsEntry - PreferredSchedulingTerm - Probe - ProjectedVolumeSource - QuobyteVolumeSource - RBDPersistentVolumeSource - RBDVolumeSource - RangeAllocation - ReplicationController - ReplicationControllerCondition - ReplicationControllerList - ReplicationControllerSpec - ReplicationControllerStatus - ResourceFieldSelector - ResourceQuota - ResourceQuotaList - ResourceQuotaSpec - ResourceQuotaStatus - ResourceRequirements - SELinuxOptions - ScaleIOPersistentVolumeSource - ScaleIOVolumeSource - ScopeSelector - ScopedResourceSelectorRequirement - Secret - SecretEnvSource - SecretKeySelector - SecretList - SecretProjection - SecretReference - SecretVolumeSource - SecurityContext - SerializedReference - Service - ServiceAccount - ServiceAccountList - ServiceAccountTokenProjection - ServiceList - ServicePort - ServiceProxyOptions - ServiceSpec - ServiceStatus - SessionAffinityConfig - StorageOSPersistentVolumeSource - StorageOSVolumeSource - Sysctl - TCPSocketAction - Taint - Toleration - TopologySelectorLabelRequirement - TopologySelectorTerm - TypedLocalObjectReference - Volume - VolumeDevice - VolumeMount - VolumeNodeAffinity - VolumeProjection - VolumeSource - VsphereVirtualDiskVolumeSource - WeightedPodAffinityTerm -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -255,17278 +52,29053 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_83c10c24ec417dc9, []int{0} } - -func (m *Affinity) Reset() { *m = Affinity{} } -func (*Affinity) ProtoMessage() {} -func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } -func (*AttachedVolume) ProtoMessage() {} -func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *AvoidPods) Reset() { *m = AvoidPods{} } -func (*AvoidPods) ProtoMessage() {} -func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } -func (*AzureDiskVolumeSource) ProtoMessage() {} -func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } -func (*AzureFilePersistentVolumeSource) ProtoMessage() {} -func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.Merge(m, src) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } -func (*AzureFileVolumeSource) ProtoMessage() {} -func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *Binding) Reset() { *m = Binding{} } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo -func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } -func (*CSIPersistentVolumeSource) ProtoMessage() {} -func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{1} +} +func (m *Affinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Affinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Affinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Affinity.Merge(m, src) +} +func (m *Affinity) XXX_Size() int { + return m.Size() +} +func (m *Affinity) XXX_DiscardUnknown() { + xxx_messageInfo_Affinity.DiscardUnknown(m) } -func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } -func (*CSIVolumeSource) ProtoMessage() {} -func (*CSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *Capabilities) Reset() { *m = Capabilities{} } -func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_Affinity proto.InternalMessageInfo -func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } -func (*CephFSPersistentVolumeSource) ProtoMessage() {} -func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{2} +} +func (m *AttachedVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttachedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AttachedVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachedVolume.Merge(m, src) +} +func (m *AttachedVolume) XXX_Size() int { + return m.Size() +} +func (m *AttachedVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AttachedVolume.DiscardUnknown(m) } -func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } -func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo -func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } -func (*CinderPersistentVolumeSource) ProtoMessage() {} -func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{3} +} +func (m *AvoidPods) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AvoidPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AvoidPods) XXX_Merge(src proto.Message) { + xxx_messageInfo_AvoidPods.Merge(m, src) +} +func (m *AvoidPods) XXX_Size() int { + return m.Size() +} +func (m *AvoidPods) XXX_DiscardUnknown() { + xxx_messageInfo_AvoidPods.DiscardUnknown(m) } -func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } -func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_AvoidPods proto.InternalMessageInfo -func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } -func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{4} +} +func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureDiskVolumeSource.Merge(m, src) +} +func (m *AzureDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureDiskVolumeSource.DiscardUnknown(m) +} -func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } -func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo -func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } -func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } +func (*AzureFilePersistentVolumeSource) ProtoMessage() {} +func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{5} +} +func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFilePersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFilePersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFilePersistentVolumeSource.Merge(m, src) +} +func (m *AzureFilePersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFilePersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFilePersistentVolumeSource.DiscardUnknown(m) +} -func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } -func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo -func (m *ConfigMap) Reset() { *m = ConfigMap{} } -func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{6} +} +func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFileVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFileVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFileVolumeSource.Merge(m, src) +} +func (m *AzureFileVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFileVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFileVolumeSource.DiscardUnknown(m) +} -func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } -func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo -func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } -func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{7} +} +func (m *Binding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Binding) XXX_Merge(src proto.Message) { + xxx_messageInfo_Binding.Merge(m, src) +} +func (m *Binding) XXX_Size() int { + return m.Size() +} +func (m *Binding) XXX_DiscardUnknown() { + xxx_messageInfo_Binding.DiscardUnknown(m) +} -func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } -func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_Binding proto.InternalMessageInfo -func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } -func (*ConfigMapNodeConfigSource) ProtoMessage() {} -func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{23} +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{8} +} +func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIPersistentVolumeSource.Merge(m, src) +} +func (m *CSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIPersistentVolumeSource.DiscardUnknown(m) } -func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } -func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo -func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } -func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } +func (*CSIVolumeSource) ProtoMessage() {} +func (*CSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{9} +} +func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIVolumeSource.Merge(m, src) +} +func (m *CSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIVolumeSource.DiscardUnknown(m) +} -func (m *Container) Reset() { *m = Container{} } -func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo -func (m *ContainerImage) Reset() { *m = ContainerImage{} } -func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{10} +} +func (m *Capabilities) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Capabilities) XXX_Merge(src proto.Message) { + xxx_messageInfo_Capabilities.Merge(m, src) +} +func (m *Capabilities) XXX_Size() int { + return m.Size() +} +func (m *Capabilities) XXX_DiscardUnknown() { + xxx_messageInfo_Capabilities.DiscardUnknown(m) +} -func (m *ContainerPort) Reset() { *m = ContainerPort{} } -func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_Capabilities proto.InternalMessageInfo -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } +func (*CephFSPersistentVolumeSource) ProtoMessage() {} +func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{11} +} +func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSPersistentVolumeSource.Merge(m, src) +} +func (m *CephFSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSPersistentVolumeSource.DiscardUnknown(m) +} -func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } -func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo -func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } -func (*ContainerStateTerminated) ProtoMessage() {} -func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{12} +} +func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSVolumeSource.Merge(m, src) +} +func (m *CephFSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } -func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo -func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } -func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } +func (*CinderPersistentVolumeSource) ProtoMessage() {} +func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{13} +} +func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderPersistentVolumeSource.Merge(m, src) +} +func (m *CinderPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderPersistentVolumeSource.DiscardUnknown(m) +} -func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } -func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo -func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } -func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{14} +} +func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderVolumeSource.Merge(m, src) +} +func (m *CinderVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderVolumeSource.DiscardUnknown(m) +} -func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } -func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo -func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } -func (*DownwardAPIVolumeSource) ProtoMessage() {} -func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{37} +func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } +func (*ClientIPConfig) ProtoMessage() {} +func (*ClientIPConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{15} +} +func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClientIPConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientIPConfig.Merge(m, src) +} +func (m *ClientIPConfig) XXX_Size() int { + return m.Size() +} +func (m *ClientIPConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClientIPConfig.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } -func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo -func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } -func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{16} +} +func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentCondition.Merge(m, src) +} +func (m *ComponentCondition) XXX_Size() int { + return m.Size() +} +func (m *ComponentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentCondition.DiscardUnknown(m) +} -func (m *EndpointPort) Reset() { *m = EndpointPort{} } -func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo -func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } -func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{17} +} +func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatus.Merge(m, src) +} +func (m *ComponentStatus) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatus.DiscardUnknown(m) +} -func (m *Endpoints) Reset() { *m = Endpoints{} } -func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo -func (m *EndpointsList) Reset() { *m = EndpointsList{} } -func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{18} +} +func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatusList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatusList.Merge(m, src) +} +func (m *ComponentStatusList) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatusList) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatusList.DiscardUnknown(m) +} -func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } -func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo -func (m *EnvVar) Reset() { *m = EnvVar{} } -func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{19} +} +func (m *ConfigMap) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMap.Merge(m, src) +} +func (m *ConfigMap) XXX_Size() int { + return m.Size() +} +func (m *ConfigMap) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMap.DiscardUnknown(m) +} -func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } -func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +var xxx_messageInfo_ConfigMap proto.InternalMessageInfo -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{20} +} +func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapEnvSource.Merge(m, src) +} +func (m *ConfigMapEnvSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapEnvSource.DiscardUnknown(m) +} -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{21} +} +func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapKeySelector.Merge(m, src) +} +func (m *ConfigMapKeySelector) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapKeySelector.DiscardUnknown(m) +} -func (m *EventSource) Reset() { *m = EventSource{} } -func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo -func (m *ExecAction) Reset() { *m = ExecAction{} } -func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{22} +} +func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapList.Merge(m, src) +} +func (m *ConfigMapList) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapList) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapList.DiscardUnknown(m) +} -func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } -func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo -func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } -func (*FlexPersistentVolumeSource) ProtoMessage() {} -func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{53} +func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } +func (*ConfigMapNodeConfigSource) ProtoMessage() {} +func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{23} +} +func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapNodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapNodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapNodeConfigSource.Merge(m, src) +} +func (m *ConfigMapNodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapNodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapNodeConfigSource.DiscardUnknown(m) } -func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } -func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } - -func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } -func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo -func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } -func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} -func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{24} +} +func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapProjection.Merge(m, src) +} +func (m *ConfigMapProjection) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapProjection.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } -func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo -func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } -func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} -func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{58} +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{25} +} +func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapVolumeSource.Merge(m, src) +} +func (m *ConfigMapVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapVolumeSource.DiscardUnknown(m) } -func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } -func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } - -func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } -func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo -func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } -func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{26} +} +func (m *Container) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Container) XXX_Merge(src proto.Message) { + xxx_messageInfo_Container.Merge(m, src) +} +func (m *Container) XXX_Size() int { + return m.Size() +} +func (m *Container) XXX_DiscardUnknown() { + xxx_messageInfo_Container.DiscardUnknown(m) +} -func (m *Handler) Reset() { *m = Handler{} } -func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +var xxx_messageInfo_Container proto.InternalMessageInfo -func (m *HostAlias) Reset() { *m = HostAlias{} } -func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{27} +} +func (m *ContainerImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerImage.Merge(m, src) +} +func (m *ContainerImage) XXX_Size() int { + return m.Size() +} +func (m *ContainerImage) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerImage.DiscardUnknown(m) +} -func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } -func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +var xxx_messageInfo_ContainerImage proto.InternalMessageInfo -func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } -func (*ISCSIPersistentVolumeSource) ProtoMessage() {} -func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{65} +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{28} +} +func (m *ContainerPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerPort.Merge(m, src) +} +func (m *ContainerPort) XXX_Size() int { + return m.Size() +} +func (m *ContainerPort) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerPort.DiscardUnknown(m) } -func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } -func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } - -func (m *KeyToPath) Reset() { *m = KeyToPath{} } -func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +var xxx_messageInfo_ContainerPort proto.InternalMessageInfo -func (m *Lifecycle) Reset() { *m = Lifecycle{} } -func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{29} +} +func (m *ContainerState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerState.Merge(m, src) +} +func (m *ContainerState) XXX_Size() int { + return m.Size() +} +func (m *ContainerState) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerState.DiscardUnknown(m) +} -func (m *LimitRange) Reset() { *m = LimitRange{} } -func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +var xxx_messageInfo_ContainerState proto.InternalMessageInfo -func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } -func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateRunning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateRunning.Merge(m, src) +} +func (m *ContainerStateRunning) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateRunning) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m) +} -func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } -func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo -func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } -func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{31} +} +func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateTerminated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateTerminated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateTerminated.Merge(m, src) +} +func (m *ContainerStateTerminated) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateTerminated) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateTerminated.DiscardUnknown(m) +} -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo -func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } -func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{32} +} +func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateWaiting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateWaiting) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateWaiting.Merge(m, src) +} +func (m *ContainerStateWaiting) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateWaiting) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateWaiting.DiscardUnknown(m) +} -func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } -func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo -func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } -func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{33} +} +func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatus.Merge(m, src) +} +func (m *ContainerStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatus.DiscardUnknown(m) +} -func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } -func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo -func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } -func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{34} +} +func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonEndpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonEndpoint.Merge(m, src) +} +func (m *DaemonEndpoint) XXX_Size() int { + return m.Size() +} +func (m *DaemonEndpoint) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonEndpoint.DiscardUnknown(m) +} -func (m *Namespace) Reset() { *m = Namespace{} } -func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo -func (m *NamespaceList) Reset() { *m = NamespaceList{} } -func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{35} +} +func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIProjection.Merge(m, src) +} +func (m *DownwardAPIProjection) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIProjection) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIProjection.DiscardUnknown(m) +} -func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } -func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo -func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } -func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{36} +} +func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeFile.Merge(m, src) +} +func (m *DownwardAPIVolumeFile) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeFile) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeFile.DiscardUnknown(m) +} -func (m *Node) Reset() { *m = Node{} } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo -func (m *NodeAddress) Reset() { *m = NodeAddress{} } -func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{37} +} +func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeSource.Merge(m, src) +} +func (m *DownwardAPIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeSource.DiscardUnknown(m) +} -func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } -func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo -func (m *NodeCondition) Reset() { *m = NodeCondition{} } -func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{38} +} +func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmptyDirVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EmptyDirVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmptyDirVolumeSource.Merge(m, src) +} +func (m *EmptyDirVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *EmptyDirVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_EmptyDirVolumeSource.DiscardUnknown(m) +} -func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } -func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo -func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } -func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{39} +} +func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointAddress.Merge(m, src) +} +func (m *EndpointAddress) XXX_Size() int { + return m.Size() +} +func (m *EndpointAddress) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointAddress.DiscardUnknown(m) +} -func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } -func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo -func (m *NodeList) Reset() { *m = NodeList{} } -func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{40} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} -func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } -func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo -func (m *NodeResources) Reset() { *m = NodeResources{} } -func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{41} +} +func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSubset) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSubset.Merge(m, src) +} +func (m *EndpointSubset) XXX_Size() int { + return m.Size() +} +func (m *EndpointSubset) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSubset.DiscardUnknown(m) +} -func (m *NodeSelector) Reset() { *m = NodeSelector{} } -func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo -func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } -func (*NodeSelectorRequirement) ProtoMessage() {} -func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{94} +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{42} +} +func (m *Endpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoints.Merge(m, src) +} +func (m *Endpoints) XXX_Size() int { + return m.Size() +} +func (m *Endpoints) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoints.DiscardUnknown(m) } -func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } -func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } - -func (m *NodeSpec) Reset() { *m = NodeSpec{} } -func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +var xxx_messageInfo_Endpoints proto.InternalMessageInfo -func (m *NodeStatus) Reset() { *m = NodeStatus{} } -func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{43} +} +func (m *EndpointsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointsList.Merge(m, src) +} +func (m *EndpointsList) XXX_Size() int { + return m.Size() +} +func (m *EndpointsList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointsList.DiscardUnknown(m) +} -func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } -func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +var xxx_messageInfo_EndpointsList proto.InternalMessageInfo -func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } -func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{44} +} +func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvFromSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvFromSource.Merge(m, src) +} +func (m *EnvFromSource) XXX_Size() int { + return m.Size() +} +func (m *EnvFromSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvFromSource.DiscardUnknown(m) +} -func (m *ObjectReference) Reset() { *m = ObjectReference{} } -func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo -func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } -func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{45} +} +func (m *EnvVar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvVar) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVar.Merge(m, src) +} +func (m *EnvVar) XXX_Size() int { + return m.Size() +} +func (m *EnvVar) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVar.DiscardUnknown(m) +} -func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } -func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +var xxx_messageInfo_EnvVar proto.InternalMessageInfo -func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } -func (*PersistentVolumeClaimCondition) ProtoMessage() {} -func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{46} } - -func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } -func (*PersistentVolumeClaimList) ProtoMessage() {} -func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} +func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } -func (*PersistentVolumeClaimSpec) ProtoMessage() {} -func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} +func (m *EnvVarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - -func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } -func (*PersistentVolumeClaimStatus) ProtoMessage() {} -func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} +func (m *EnvVarSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVarSource.Merge(m, src) } - -func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } -func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} -func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{107} +func (m *EnvVarSource) XXX_Size() int { + return m.Size() +} +func (m *EnvVarSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVarSource.DiscardUnknown(m) } -func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } -func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} +func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } +func (*EphemeralContainer) ProtoMessage() {} +func (*EphemeralContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{47} +} +func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainer.Merge(m, src) +} +func (m *EphemeralContainer) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainer) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainer.DiscardUnknown(m) } -func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } -func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{111} +func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } +func (*EphemeralContainerCommon) ProtoMessage() {} +func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{48} } - -func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } -func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} -func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{112} +func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainerCommon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainerCommon) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainerCommon.Merge(m, src) +} +func (m *EphemeralContainerCommon) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainerCommon) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainerCommon.DiscardUnknown(m) } -func (m *Pod) Reset() { *m = Pod{} } -func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo -func (m *PodAffinity) Reset() { *m = PodAffinity{} } -func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (m *EphemeralContainers) Reset() { *m = EphemeralContainers{} } +func (*EphemeralContainers) ProtoMessage() {} +func (*EphemeralContainers) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{49} +} +func (m *EphemeralContainers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainers) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainers.Merge(m, src) +} +func (m *EphemeralContainers) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainers) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainers.DiscardUnknown(m) +} -func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } -func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +var xxx_messageInfo_EphemeralContainers proto.InternalMessageInfo -func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } -func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{50} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} -func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } -func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +var xxx_messageInfo_Event proto.InternalMessageInfo -func (m *PodCondition) Reset() { *m = PodCondition{} } -func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{51} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} -func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } -func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +var xxx_messageInfo_EventList proto.InternalMessageInfo -func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } -func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{52} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} -func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } -func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +var xxx_messageInfo_EventSeries proto.InternalMessageInfo -func (m *PodList) Reset() { *m = PodList{} } -func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{53} +} +func (m *EventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSource.Merge(m, src) +} +func (m *EventSource) XXX_Size() int { + return m.Size() +} +func (m *EventSource) XXX_DiscardUnknown() { + xxx_messageInfo_EventSource.DiscardUnknown(m) +} -func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } -func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +var xxx_messageInfo_EventSource proto.InternalMessageInfo -func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } -func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{54} +} +func (m *ExecAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecAction.Merge(m, src) +} +func (m *ExecAction) XXX_Size() int { + return m.Size() +} +func (m *ExecAction) XXX_DiscardUnknown() { + xxx_messageInfo_ExecAction.DiscardUnknown(m) +} -func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } -func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +var xxx_messageInfo_ExecAction proto.InternalMessageInfo -func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } -func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{55} +} +func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FCVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FCVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FCVolumeSource.Merge(m, src) +} +func (m *FCVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FCVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FCVolumeSource.DiscardUnknown(m) +} -func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } -func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo -func (m *PodSignature) Reset() { *m = PodSignature{} } -func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } +func (*FlexPersistentVolumeSource) ProtoMessage() {} +func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{56} +} +func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexPersistentVolumeSource.Merge(m, src) +} +func (m *FlexPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexPersistentVolumeSource.DiscardUnknown(m) +} -func (m *PodSpec) Reset() { *m = PodSpec{} } -func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo -func (m *PodStatus) Reset() { *m = PodStatus{} } -func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{57} +} +func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexVolumeSource.Merge(m, src) +} +func (m *FlexVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexVolumeSource.DiscardUnknown(m) +} -func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } -func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo -func (m *PodTemplate) Reset() { *m = PodTemplate{} } -func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{58} +} +func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlockerVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlockerVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlockerVolumeSource.Merge(m, src) +} +func (m *FlockerVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlockerVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlockerVolumeSource.DiscardUnknown(m) +} -func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } -func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo -func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } -func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{59} +} +func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GCEPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCEPersistentDiskVolumeSource.Merge(m, src) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GCEPersistentDiskVolumeSource.DiscardUnknown(m) +} -func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } -func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{60} +} +func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRepoVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRepoVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRepoVolumeSource.Merge(m, src) +} +func (m *GitRepoVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GitRepoVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitRepoVolumeSource.DiscardUnknown(m) +} -func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } -func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo -func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } -func (*PreferredSchedulingTerm) ProtoMessage() {} -func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{138} +func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } +func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} +func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{61} +} +func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsPersistentVolumeSource.Merge(m, src) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsPersistentVolumeSource.DiscardUnknown(m) } -func (m *Probe) Reset() { *m = Probe{} } -func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo -func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } -func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{62} +} +func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsVolumeSource.Merge(m, src) +} +func (m *GlusterfsVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsVolumeSource.DiscardUnknown(m) +} -func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } -func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo -func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } -func (*RBDPersistentVolumeSource) ProtoMessage() {} -func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{142} +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{63} +} +func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPGetAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPGetAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPGetAction.Merge(m, src) +} +func (m *HTTPGetAction) XXX_Size() int { + return m.Size() +} +func (m *HTTPGetAction) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPGetAction.DiscardUnknown(m) } -func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } -func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo -func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } -func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{64} +} +func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPHeader.Merge(m, src) +} +func (m *HTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *HTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPHeader.DiscardUnknown(m) +} -func (m *ReplicationController) Reset() { *m = ReplicationController{} } -func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo -func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } -func (*ReplicationControllerCondition) ProtoMessage() {} -func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{65} +} +func (m *Handler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Handler) XXX_Merge(src proto.Message) { + xxx_messageInfo_Handler.Merge(m, src) +} +func (m *Handler) XXX_Size() int { + return m.Size() +} +func (m *Handler) XXX_DiscardUnknown() { + xxx_messageInfo_Handler.DiscardUnknown(m) } -func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } -func (*ReplicationControllerList) ProtoMessage() {} -func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} +var xxx_messageInfo_Handler proto.InternalMessageInfo + +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{66} +} +func (m *HostAlias) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostAlias) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostAlias.Merge(m, src) +} +func (m *HostAlias) XXX_Size() int { + return m.Size() +} +func (m *HostAlias) XXX_DiscardUnknown() { + xxx_messageInfo_HostAlias.DiscardUnknown(m) } -func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } -func (*ReplicationControllerSpec) ProtoMessage() {} -func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{148} +var xxx_messageInfo_HostAlias proto.InternalMessageInfo + +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{67} +} +func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPathVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPathVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPathVolumeSource.Merge(m, src) +} +func (m *HostPathVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *HostPathVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_HostPathVolumeSource.DiscardUnknown(m) } -func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } -func (*ReplicationControllerStatus) ProtoMessage() {} -func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{149} +var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo + +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{68} +} +func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIPersistentVolumeSource.Merge(m, src) +} +func (m *ISCSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIPersistentVolumeSource.DiscardUnknown(m) } -func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } -func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo -func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } -func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{69} +} +func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIVolumeSource.Merge(m, src) +} +func (m *ISCSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIVolumeSource.DiscardUnknown(m) +} -func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } -func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo -func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } -func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{70} +} +func (m *KeyToPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyToPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KeyToPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyToPath.Merge(m, src) +} +func (m *KeyToPath) XXX_Size() int { + return m.Size() +} +func (m *KeyToPath) XXX_DiscardUnknown() { + xxx_messageInfo_KeyToPath.DiscardUnknown(m) +} -func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } -func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +var xxx_messageInfo_KeyToPath proto.InternalMessageInfo -func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } -func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{71} +} +func (m *Lifecycle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lifecycle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lifecycle.Merge(m, src) +} +func (m *Lifecycle) XXX_Size() int { + return m.Size() +} +func (m *Lifecycle) XXX_DiscardUnknown() { + xxx_messageInfo_Lifecycle.DiscardUnknown(m) +} -func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } -func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +var xxx_messageInfo_Lifecycle proto.InternalMessageInfo -func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } -func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} -func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{157} +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{72} +} +func (m *LimitRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRange.Merge(m, src) +} +func (m *LimitRange) XXX_Size() int { + return m.Size() +} +func (m *LimitRange) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRange.DiscardUnknown(m) } -func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } -func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +var xxx_messageInfo_LimitRange proto.InternalMessageInfo + +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{73} +} +func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeItem.Merge(m, src) +} +func (m *LimitRangeItem) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeItem) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeItem.DiscardUnknown(m) +} -func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } -func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo -func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } -func (*ScopedResourceSelectorRequirement) ProtoMessage() {} -func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{160} +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{74} +} +func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *LimitRangeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeList.Merge(m, src) +} +func (m *LimitRangeList) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeList) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeList.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo -func (m *Secret) Reset() { *m = Secret{} } -func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{75} +} +func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeSpec.Merge(m, src) +} +func (m *LimitRangeSpec) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeSpec.DiscardUnknown(m) +} -func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } -func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo -func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } -func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{76} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} -func (m *SecretList) Reset() { *m = SecretList{} } -func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +var xxx_messageInfo_List proto.InternalMessageInfo -func (m *SecretProjection) Reset() { *m = SecretProjection{} } -func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{77} +} +func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerIngress.Merge(m, src) +} +func (m *LoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m) +} -func (m *SecretReference) Reset() { *m = SecretReference{} } -func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo -func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } -func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{78} +} +func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerStatus.Merge(m, src) +} +func (m *LoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m) +} -func (m *SecurityContext) Reset() { *m = SecurityContext{} } -func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo -func (m *SerializedReference) Reset() { *m = SerializedReference{} } -func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{79} +} +func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalObjectReference.Merge(m, src) +} +func (m *LocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *LocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_LocalObjectReference.DiscardUnknown(m) +} -func (m *Service) Reset() { *m = Service{} } -func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo -func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } -func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{80} +} +func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalVolumeSource.Merge(m, src) +} +func (m *LocalVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *LocalVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_LocalVolumeSource.DiscardUnknown(m) +} -func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } -func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo -func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } -func (*ServiceAccountTokenProjection) ProtoMessage() {} -func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{173} +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{81} +} +func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NFSVolumeSource.Merge(m, src) +} +func (m *NFSVolumeSource) XXX_Size() int { + return m.Size() } +func (m *NFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_NFSVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo -func (m *ServiceList) Reset() { *m = ServiceList{} } -func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{82} +} +func (m *Namespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Namespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Namespace.Merge(m, src) +} +func (m *Namespace) XXX_Size() int { + return m.Size() +} +func (m *Namespace) XXX_DiscardUnknown() { + xxx_messageInfo_Namespace.DiscardUnknown(m) +} -func (m *ServicePort) Reset() { *m = ServicePort{} } -func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +var xxx_messageInfo_Namespace proto.InternalMessageInfo -func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } -func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } +func (*NamespaceCondition) ProtoMessage() {} +func (*NamespaceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{83} +} +func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceCondition.Merge(m, src) +} +func (m *NamespaceCondition) XXX_Size() int { + return m.Size() +} +func (m *NamespaceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceCondition.DiscardUnknown(m) +} -func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } -func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo -func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } -func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{84} +} +func (m *NamespaceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceList.Merge(m, src) +} +func (m *NamespaceList) XXX_Size() int { + return m.Size() +} +func (m *NamespaceList) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceList.DiscardUnknown(m) +} -func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } -func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +var xxx_messageInfo_NamespaceList proto.InternalMessageInfo -func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } -func (*StorageOSPersistentVolumeSource) ProtoMessage() {} -func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{180} +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{85} +} +func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceSpec.Merge(m, src) +} +func (m *NamespaceSpec) XXX_Size() int { + return m.Size() +} +func (m *NamespaceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceSpec.DiscardUnknown(m) } -func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } -func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo -func (m *Sysctl) Reset() { *m = Sysctl{} } -func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{86} +} +func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceStatus.Merge(m, src) +} +func (m *NamespaceStatus) XXX_Size() int { + return m.Size() +} +func (m *NamespaceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceStatus.DiscardUnknown(m) +} -func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } -func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo -func (m *Taint) Reset() { *m = Taint{} } -func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{87} +} +func (m *Node) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return m.Size() +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} -func (m *Toleration) Reset() { *m = Toleration{} } -func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } +var xxx_messageInfo_Node proto.InternalMessageInfo -func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } -func (*TopologySelectorLabelRequirement) ProtoMessage() {} -func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{88} +} +func (m *NodeAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAddress.Merge(m, src) +} +func (m *NodeAddress) XXX_Size() int { + return m.Size() +} +func (m *NodeAddress) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAddress.DiscardUnknown(m) } -func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } -func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } +var xxx_messageInfo_NodeAddress proto.InternalMessageInfo -func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } -func (*TypedLocalObjectReference) ProtoMessage() {} -func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{188} +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{89} +} +func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAffinity.Merge(m, src) +} +func (m *NodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *NodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAffinity.DiscardUnknown(m) } -func (m *Volume) Reset() { *m = Volume{} } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } +var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo -func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } -func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{90} +} +func (m *NodeCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeCondition.Merge(m, src) +} +func (m *NodeCondition) XXX_Size() int { + return m.Size() +} +func (m *NodeCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NodeCondition.DiscardUnknown(m) +} -func (m *VolumeMount) Reset() { *m = VolumeMount{} } -func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } +var xxx_messageInfo_NodeCondition proto.InternalMessageInfo -func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } -func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } +func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } +func (*NodeConfigSource) ProtoMessage() {} +func (*NodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{91} +} +func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigSource.Merge(m, src) +} +func (m *NodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigSource.DiscardUnknown(m) +} -func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } -func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} } +var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo -func (m *VolumeSource) Reset() { *m = VolumeSource{} } -func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{194} } +func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } +func (*NodeConfigStatus) ProtoMessage() {} +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{92} +} +func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigStatus.Merge(m, src) +} +func (m *NodeConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigStatus.DiscardUnknown(m) +} -func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } -func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} -func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{195} +var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo + +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{93} +} +func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeDaemonEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeDaemonEndpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeDaemonEndpoints.Merge(m, src) +} +func (m *NodeDaemonEndpoints) XXX_Size() int { + return m.Size() +} +func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { + xxx_messageInfo_NodeDaemonEndpoints.DiscardUnknown(m) } -func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } -func (*WeightedPodAffinityTerm) ProtoMessage() {} -func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{196} +var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo + +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{94} +} +func (m *NodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeList.Merge(m, src) +} +func (m *NodeList) XXX_Size() int { + return m.Size() +} +func (m *NodeList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeList.DiscardUnknown(m) } -func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") - proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") - proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") - proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") - proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") - proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") - proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") - proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") - proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") - proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") - proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") - proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") - proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") - proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") - proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") - proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") - proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") - proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") - proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") - proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") - proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") - proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") - proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") - proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") - proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") - proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") - proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") - proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") - proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") - proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") - proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") - proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") - proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") - proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") - proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") - proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") - proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") - proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") - proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") - proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") - proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") - proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") - proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") - proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") - proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") - proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") - proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") - proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") - proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") - proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") - proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") - proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") - proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") - proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") - proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") - proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") - proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") - proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") - proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") - proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") - proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") - proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") - proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") - proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") - proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") - proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") - proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") - proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") - proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") - proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") - proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") - proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") - proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") - proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") - proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") - proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") - proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") - proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") - proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") - proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") - proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") - proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") - proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") - proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") - proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") - proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") - proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") - proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") - proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") - proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") - proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") +var xxx_messageInfo_NodeList proto.InternalMessageInfo + +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{95} } -func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeProxyOptions.Merge(m, src) +} +func (m *NodeProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *NodeProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_NodeProxyOptions.DiscardUnknown(m) } -func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo + +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{96} +} +func (m *NodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *NodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResources.Merge(m, src) +} +func (m *NodeResources) XXX_Size() int { + return m.Size() +} +func (m *NodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResources.DiscardUnknown(m) } -func (m *Affinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeResources proto.InternalMessageInfo + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{97} +} +func (m *NodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelector.Merge(m, src) +} +func (m *NodeSelector) XXX_Size() int { + return m.Size() +} +func (m *NodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelector.DiscardUnknown(m) } -func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.NodeAffinity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n1, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PodAffinity != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinity.Size())) - n2, err := m.PodAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PodAntiAffinity != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAntiAffinity.Size())) - n3, err := m.PodAntiAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 +var xxx_messageInfo_NodeSelector proto.InternalMessageInfo + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{98} +} +func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorRequirement.Merge(m, src) +} +func (m *NodeSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorRequirement.DiscardUnknown(m) } -func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{99} +} +func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil +func (m *NodeSelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorTerm.Merge(m, src) +} +func (m *NodeSelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorTerm.DiscardUnknown(m) } -func (m *AvoidPods) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{100} +} +func (m *NodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSpec.Merge(m, src) +} +func (m *NodeSpec) XXX_Size() int { + return m.Size() +} +func (m *NodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSpec.DiscardUnknown(m) } -func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, msg := range m.PreferAvoidPods { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_NodeSpec proto.InternalMessageInfo + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{101} +} +func (m *NodeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStatus.Merge(m, src) +} +func (m *NodeStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStatus.DiscardUnknown(m) } -func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeStatus proto.InternalMessageInfo + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{102} +} +func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSystemInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSystemInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSystemInfo.Merge(m, src) +} +func (m *NodeSystemInfo) XXX_Size() int { + return m.Size() +} +func (m *NodeSystemInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSystemInfo.DiscardUnknown(m) } -func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) - i += copy(dAtA[i:], m.DiskName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) - i += copy(dAtA[i:], m.DataDiskURI) - if m.CachingMode != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) - i += copy(dAtA[i:], *m.CachingMode) - } - if m.FSType != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if m.ReadOnly != nil { - dAtA[i] = 0x28 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Kind != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) +var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{103} +} +func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ObjectFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectFieldSelector.Merge(m, src) +} +func (m *ObjectFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ObjectFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectFieldSelector.DiscardUnknown(m) } -func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{104} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) } -func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) - i += copy(dAtA[i:], *m.SecretNamespace) +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{105} +} +func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolume.Merge(m, src) +} +func (m *PersistentVolume) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolume) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolume.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{106} +} +func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaim.Merge(m, src) +} +func (m *PersistentVolumeClaim) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaim) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaim.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo + +func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } +func (*PersistentVolumeClaimCondition) ProtoMessage() {} +func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{107} +} +func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimCondition.Merge(m, src) +} +func (m *PersistentVolumeClaimCondition) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimCondition.DiscardUnknown(m) } -func (m *Binding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{108} +} +func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimList.Merge(m, src) +} +func (m *PersistentVolumeClaimList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimList.DiscardUnknown(m) } -func (m *Binding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{109} +} +func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n5, err := m.Target.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *PersistentVolumeClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimSpec.Merge(m, src) +} +func (m *PersistentVolumeClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{110} +} +func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n5 - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimStatus.Merge(m, src) +} +func (m *PersistentVolumeClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimStatus.DiscardUnknown(m) } -func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{111} +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.DiscardUnknown(m) } -func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) - i += copy(dAtA[i:], m.VolumeHandle) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x2a - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ControllerPublishSecretRef != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerPublishSecretRef.Size())) - n6, err := m.ControllerPublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.NodeStageSecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeStageSecretRef.Size())) - n7, err := m.NodeStageSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n8, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 +var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{112} +} +func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeList.Merge(m, src) +} +func (m *PersistentVolumeList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeList.DiscardUnknown(m) } -func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{113} +} +func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSource.DiscardUnknown(m) } -func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - if m.ReadOnly != nil { - dAtA[i] = 0x10 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.FSType != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x22 - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n9, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} +var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo -func (m *Capabilities) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{114} +} +func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil +func (m *PersistentVolumeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSpec.Merge(m, src) +} +func (m *PersistentVolumeSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSpec.DiscardUnknown(m) } -func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{115} +} +func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeStatus.Merge(m, src) +} +func (m *PersistentVolumeStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeStatus.DiscardUnknown(m) } -func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n10, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo + +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{116} +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.Merge(m, src) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PhotonPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.DiscardUnknown(m) } -func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo + +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{117} +} +func (m *Pod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Pod) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pod.Merge(m, src) +} +func (m *Pod) XXX_Size() int { + return m.Size() +} +func (m *Pod) XXX_DiscardUnknown() { + xxx_messageInfo_Pod.DiscardUnknown(m) } -func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n11, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_Pod proto.InternalMessageInfo + +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{118} +} +func (m *PodAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *PodAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinity.Merge(m, src) +} +func (m *PodAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinity.DiscardUnknown(m) } -func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAffinity proto.InternalMessageInfo + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{119} +} +func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinityTerm.Merge(m, src) +} +func (m *PodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *PodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinityTerm.DiscardUnknown(m) } -func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n12, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 +var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{120} +} +func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodAntiAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAntiAffinity.Merge(m, src) +} +func (m *PodAntiAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAntiAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAntiAffinity.DiscardUnknown(m) } -func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{121} +} +func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAttachOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodAttachOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAttachOptions.Merge(m, src) +} +func (m *PodAttachOptions) XXX_Size() int { + return m.Size() +} +func (m *PodAttachOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodAttachOptions.DiscardUnknown(m) } -func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n13, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 +var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{122} +} +func (m *PodCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCondition.Merge(m, src) +} +func (m *PodCondition) XXX_Size() int { + return m.Size() +} +func (m *PodCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PodCondition.DiscardUnknown(m) } -func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodCondition proto.InternalMessageInfo + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{123} +} +func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodDNSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfig.Merge(m, src) +} +func (m *PodDNSConfig) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfig.DiscardUnknown(m) } -func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TimeoutSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) +var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{124} +} +func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfigOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodDNSConfigOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfigOption.Merge(m, src) +} +func (m *PodDNSConfigOption) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfigOption) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfigOption.DiscardUnknown(m) } -func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{125} +} +func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodExecOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil +func (m *PodExecOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodExecOptions.Merge(m, src) +} +func (m *PodExecOptions) XXX_Size() int { + return m.Size() +} +func (m *PodExecOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodExecOptions.DiscardUnknown(m) } -func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo + +func (m *PodIP) Reset() { *m = PodIP{} } +func (*PodIP) ProtoMessage() {} +func (*PodIP) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{126} +} +func (m *PodIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodIP.Merge(m, src) +} +func (m *PodIP) XXX_Size() int { + return m.Size() +} +func (m *PodIP) XXX_DiscardUnknown() { + xxx_messageInfo_PodIP.DiscardUnknown(m) } -func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n14, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodIP proto.InternalMessageInfo + +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{127} +} +func (m *PodList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n14 - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodList.Merge(m, src) +} +func (m *PodList) XXX_Size() int { + return m.Size() +} +func (m *PodList) XXX_DiscardUnknown() { + xxx_messageInfo_PodList.DiscardUnknown(m) } -func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodList proto.InternalMessageInfo + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{128} +} +func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodLogOptions.Merge(m, src) +} +func (m *PodLogOptions) XXX_Size() int { + return m.Size() +} +func (m *PodLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodLogOptions.DiscardUnknown(m) } -func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n15, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{129} +} +func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPortForwardOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n15 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodPortForwardOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPortForwardOptions.Merge(m, src) +} +func (m *PodPortForwardOptions) XXX_Size() int { + return m.Size() +} +func (m *PodPortForwardOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodPortForwardOptions.DiscardUnknown(m) } -func (m *ConfigMap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{130} +} +func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodProxyOptions.Merge(m, src) +} +func (m *PodProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *PodProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodProxyOptions.DiscardUnknown(m) } -func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n16, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo + +func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } +func (*PodReadinessGate) ProtoMessage() {} +func (*PodReadinessGate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{131} +} +func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n16 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.BinaryData) > 0 { - keysForBinaryData := make([]string, 0, len(m.BinaryData)) - for k := range m.BinaryData { - keysForBinaryData = append(keysForBinaryData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - for _, k := range keysForBinaryData { - dAtA[i] = 0x1a - i++ - v := m.BinaryData[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodReadinessGate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodReadinessGate.Merge(m, src) +} +func (m *PodReadinessGate) XXX_Size() int { + return m.Size() +} +func (m *PodReadinessGate) XXX_DiscardUnknown() { + xxx_messageInfo_PodReadinessGate.DiscardUnknown(m) } -func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{132} +} +func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityContext.Merge(m, src) +} +func (m *PodSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityContext.DiscardUnknown(m) } -func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo + +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{133} +} +func (m *PodSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n17 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSignature.Merge(m, src) +} +func (m *PodSignature) XXX_Size() int { + return m.Size() +} +func (m *PodSignature) XXX_DiscardUnknown() { + xxx_messageInfo_PodSignature.DiscardUnknown(m) } -func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodSignature proto.InternalMessageInfo + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{134} +} +func (m *PodSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSpec.Merge(m, src) +} +func (m *PodSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSpec.DiscardUnknown(m) } -func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodSpec proto.InternalMessageInfo + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{135} +} +func (m *PodStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatus.Merge(m, src) +} +func (m *PodStatus) XXX_Size() int { + return m.Size() +} +func (m *PodStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatus.DiscardUnknown(m) } -func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodStatus proto.InternalMessageInfo + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{136} +} +func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatusResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodStatusResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatusResult.Merge(m, src) +} +func (m *PodStatusResult) XXX_Size() int { + return m.Size() +} +func (m *PodStatusResult) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatusResult.DiscardUnknown(m) } -func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n19, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{137} +} +func (m *PodTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n19 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplate.Merge(m, src) +} +func (m *PodTemplate) XXX_Size() int { + return m.Size() +} +func (m *PodTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplate.DiscardUnknown(m) } -func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodTemplate proto.InternalMessageInfo + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{138} +} +func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) - i += copy(dAtA[i:], m.KubeletConfigKey) - return i, nil +func (m *PodTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateList.Merge(m, src) +} +func (m *PodTemplateList) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateList.DiscardUnknown(m) } -func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo + +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{139} +} +func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateSpec.Merge(m, src) +} +func (m *PodTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateSpec.DiscardUnknown(m) } -func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{140} +} +func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortworxVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n20 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PortworxVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortworxVolumeSource.Merge(m, src) +} +func (m *PortworxVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PortworxVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PortworxVolumeSource.DiscardUnknown(m) } -func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{141} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) } -func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n21, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_Preconditions proto.InternalMessageInfo + +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{142} +} +func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferAvoidPodsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n21 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PreferAvoidPodsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferAvoidPodsEntry.Merge(m, src) +} +func (m *PreferAvoidPodsEntry) XXX_Size() int { + return m.Size() +} +func (m *PreferAvoidPodsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_PreferAvoidPodsEntry.DiscardUnknown(m) } -func (m *Container) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{143} +} +func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferredSchedulingTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PreferredSchedulingTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferredSchedulingTerm.Merge(m, src) +} +func (m *PreferredSchedulingTerm) XXX_Size() int { + return m.Size() +} +func (m *PreferredSchedulingTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PreferredSchedulingTerm.DiscardUnknown(m) } -func (m *Container) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) - i += copy(dAtA[i:], m.WorkingDir) - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n22, err := m.Resources.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo + +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{144} +} +func (m *Probe) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n22 - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LivenessProbe != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n23, err := m.LivenessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.ReadinessProbe != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n24, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.Lifecycle != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n25, err := m.Lifecycle.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) - i += copy(dAtA[i:], m.TerminationMessagePath) - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) - i += copy(dAtA[i:], m.ImagePullPolicy) - if m.SecurityContext != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n26, err := m.SecurityContext.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if m.StdinOnce { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) - i += copy(dAtA[i:], m.TerminationMessagePolicy) - if len(m.VolumeDevices) > 0 { - for _, msg := range m.VolumeDevices { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Probe) XXX_Merge(src proto.Message) { + xxx_messageInfo_Probe.Merge(m, src) +} +func (m *Probe) XXX_Size() int { + return m.Size() +} +func (m *Probe) XXX_DiscardUnknown() { + xxx_messageInfo_Probe.DiscardUnknown(m) } -func (m *ContainerImage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Probe proto.InternalMessageInfo + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{145} +} +func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectedVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) - return i, nil +func (m *ProjectedVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectedVolumeSource.Merge(m, src) +} +func (m *ProjectedVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ProjectedVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectedVolumeSource.DiscardUnknown(m) } -func (m *ContainerPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo + +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{146} +} +func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuobyteVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - return i, nil +func (m *QuobyteVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuobyteVolumeSource.Merge(m, src) +} +func (m *QuobyteVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *QuobyteVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_QuobyteVolumeSource.DiscardUnknown(m) } -func (m *ContainerState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo + +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{147} +} +func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Waiting != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n27, err := m.Waiting.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.Running != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n28, err := m.Running.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - if m.Terminated != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n29, err := m.Terminated.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - return i, nil +func (m *RBDPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDPersistentVolumeSource.Merge(m, src) +} +func (m *RBDPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDPersistentVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{148} +} +func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *RBDVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDVolumeSource.Merge(m, src) +} +func (m *RBDVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n30, err := m.StartedAt.MarshalTo(dAtA[i:]) +var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{149} +} +func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n30 - return i, nil + return b[:n], nil +} +func (m *RangeAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocation.Merge(m, src) +} +func (m *RangeAllocation) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocation.DiscardUnknown(m) } -func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{150} +} +func (m *ReplicationController) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicationController) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationController.Merge(m, src) +} +func (m *ReplicationController) XXX_Size() int { + return m.Size() +} +func (m *ReplicationController) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationController.DiscardUnknown(m) } -func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n31, err := m.StartedAt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n32, err := m.FinishedAt.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ReplicationController proto.InternalMessageInfo + +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{151} +} +func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n32 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil + return b[:n], nil +} +func (m *ReplicationControllerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerCondition.Merge(m, src) +} +func (m *ReplicationControllerCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerCondition.DiscardUnknown(m) } -func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{152} +} +func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil +func (m *ReplicationControllerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerList.Merge(m, src) +} +func (m *ReplicationControllerList) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerList.DiscardUnknown(m) } -func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{153} +} +func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicationControllerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerSpec.Merge(m, src) +} +func (m *ReplicationControllerSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerSpec.DiscardUnknown(m) } -func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n33, err := m.State.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{154} +} +func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n33 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) - n34, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ReplicationControllerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerStatus.Merge(m, src) +} +func (m *ReplicationControllerStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{155} +} +func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n34 - dAtA[i] = 0x20 - i++ - if m.Ready { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return nil, err } - i++ - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) - i += copy(dAtA[i:], m.ImageID) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil + return b[:n], nil +} +func (m *ResourceFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceFieldSelector.Merge(m, src) +} +func (m *ResourceFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ResourceFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceFieldSelector.DiscardUnknown(m) } -func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{156} +} +func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - return i, nil +func (m *ResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuota.Merge(m, src) +} +func (m *ResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuota.DiscardUnknown(m) } -func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{157} +} +func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaList.Merge(m, src) +} +func (m *ResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaList.DiscardUnknown(m) } -func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{158} +} +func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ResourceQuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaSpec.Merge(m, src) +} +func (m *ResourceQuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaSpec.DiscardUnknown(m) } -func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{159} +} +func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceQuotaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaStatus.Merge(m, src) +} +func (m *ResourceQuotaStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaStatus.DiscardUnknown(m) } -func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n35, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n36, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - if m.Mode != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) +var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{160} +} +func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ResourceRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRequirements.Merge(m, src) +} +func (m *ResourceRequirements) XXX_Size() int { + return m.Size() +} +func (m *ResourceRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRequirements.DiscardUnknown(m) } -func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{161} +} +func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil +func (m *SELinuxOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxOptions.Merge(m, src) +} +func (m *SELinuxOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxOptions.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo + +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{162} +} +func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) - i += copy(dAtA[i:], m.Medium) - if m.SizeLimit != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n37, err := m.SizeLimit.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - return i, nil +func (m *ScaleIOPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOPersistentVolumeSource.Merge(m, src) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOPersistentVolumeSource.DiscardUnknown(m) } -func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo + +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{163} +} +func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if m.TargetRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n38, err := m.TargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - if m.NodeName != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) - i += copy(dAtA[i:], *m.NodeName) - } - return i, nil +func (m *ScaleIOVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOVolumeSource.Merge(m, src) +} +func (m *ScaleIOVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOVolumeSource.DiscardUnknown(m) } -func (m *EndpointPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo + +func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } +func (*ScopeSelector) ProtoMessage() {} +func (*ScopeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{164} +} +func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - return i, nil +func (m *ScopeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeSelector.Merge(m, src) +} +func (m *ScopeSelector) XXX_Size() int { + return m.Size() +} +func (m *ScopeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeSelector.DiscardUnknown(m) } -func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo + +func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } +func (*ScopedResourceSelectorRequirement) ProtoMessage() {} +func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{165} +} +func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopedResourceSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NotReadyAddresses) > 0 { - for _, msg := range m.NotReadyAddresses { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *ScopedResourceSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopedResourceSelectorRequirement.Merge(m, src) +} +func (m *ScopedResourceSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *ScopedResourceSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_ScopedResourceSelectorRequirement.DiscardUnknown(m) } -func (m *Endpoints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{166} +} +func (m *Secret) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Secret) XXX_Merge(src proto.Message) { + xxx_messageInfo_Secret.Merge(m, src) +} +func (m *Secret) XXX_Size() int { + return m.Size() +} +func (m *Secret) XXX_DiscardUnknown() { + xxx_messageInfo_Secret.DiscardUnknown(m) } -func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n39, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_Secret proto.InternalMessageInfo + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{167} +} +func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n39 - if len(m.Subsets) > 0 { - for _, msg := range m.Subsets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretEnvSource.Merge(m, src) +} +func (m *SecretEnvSource) XXX_Size() int { + return m.Size() +} +func (m *SecretEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretEnvSource.DiscardUnknown(m) } -func (m *EndpointsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{168} +} +func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretKeySelector.Merge(m, src) +} +func (m *SecretKeySelector) XXX_Size() int { + return m.Size() +} +func (m *SecretKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_SecretKeySelector.DiscardUnknown(m) } -func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n40, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{169} +} +func (m *SecretList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n40 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretList.Merge(m, src) +} +func (m *SecretList) XXX_Size() int { + return m.Size() +} +func (m *SecretList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretList.DiscardUnknown(m) } -func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretList proto.InternalMessageInfo + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{170} +} +func (m *SecretProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretProjection.Merge(m, src) +} +func (m *SecretProjection) XXX_Size() int { + return m.Size() +} +func (m *SecretProjection) XXX_DiscardUnknown() { + xxx_messageInfo_SecretProjection.DiscardUnknown(m) } -func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) - i += copy(dAtA[i:], m.Prefix) - if m.ConfigMapRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n41, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - } - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n42, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 +var xxx_messageInfo_SecretProjection proto.InternalMessageInfo + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{171} +} +func (m *SecretReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretReference.Merge(m, src) +} +func (m *SecretReference) XXX_Size() int { + return m.Size() +} +func (m *SecretReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretReference.DiscardUnknown(m) } -func (m *EnvVar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretReference proto.InternalMessageInfo + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{172} +} +func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretVolumeSource.Merge(m, src) +} +func (m *SecretVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *SecretVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretVolumeSource.DiscardUnknown(m) } -func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - if m.ValueFrom != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n43, err := m.ValueFrom.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 +var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{173} +} +func (m *SecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContext.Merge(m, src) +} +func (m *SecurityContext) XXX_Size() int { + return m.Size() +} +func (m *SecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContext.DiscardUnknown(m) } -func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecurityContext proto.InternalMessageInfo + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{174} +} +func (m *SerializedReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializedReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SerializedReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializedReference.Merge(m, src) +} +func (m *SerializedReference) XXX_Size() int { + return m.Size() +} +func (m *SerializedReference) XXX_DiscardUnknown() { + xxx_messageInfo_SerializedReference.DiscardUnknown(m) } -func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.FieldRef != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n44, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n45, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - } - if m.ConfigMapKeyRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n46, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - } - if m.SecretKeyRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n47, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 +var xxx_messageInfo_SerializedReference proto.InternalMessageInfo + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{175} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(m, src) +} +func (m *Service) XXX_Size() int { + return m.Size() +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) } -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{176} +} +func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ServiceAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccount.Merge(m, src) +} +func (m *ServiceAccount) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccount) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccount.DiscardUnknown(m) } -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{177} +} +func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n48 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n49, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccountList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountList.Merge(m, src) +} +func (m *ServiceAccountList) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo + +func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } +func (*ServiceAccountTokenProjection) ProtoMessage() {} +func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{178} +} +func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountTokenProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n49 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n50, err := m.Source.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccountTokenProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountTokenProjection.Merge(m, src) +} +func (m *ServiceAccountTokenProjection) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountTokenProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountTokenProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{179} +} +func (m *ServiceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n50 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n51, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceList.Merge(m, src) +} +func (m *ServiceList) XXX_Size() int { + return m.Size() +} +func (m *ServiceList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceList proto.InternalMessageInfo + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{180} +} +func (m *ServicePort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n51 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n52, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServicePort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServicePort.Merge(m, src) +} +func (m *ServicePort) XXX_Size() int { + return m.Size() +} +func (m *ServicePort) XXX_DiscardUnknown() { + xxx_messageInfo_ServicePort.DiscardUnknown(m) +} + +var xxx_messageInfo_ServicePort proto.InternalMessageInfo + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{181} +} +func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n52 - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n53, err := m.EventTime.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceProxyOptions.Merge(m, src) +} +func (m *ServiceProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *ServiceProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceProxyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{182} +} +func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n53 - if m.Series != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n54, err := m.Series.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n54 - } - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - if m.Related != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n55, err := m.Related.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 + return nil, err } - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - return i, nil + return b[:n], nil +} +func (m *ServiceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceSpec.Merge(m, src) +} +func (m *ServiceSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceSpec.DiscardUnknown(m) } -func (m *EventList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{183} +} +func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ServiceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceStatus.Merge(m, src) +} +func (m *ServiceStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceStatus.DiscardUnknown(m) } -func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n56, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo + +func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } +func (*SessionAffinityConfig) ProtoMessage() {} +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{184} +} +func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n56 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionAffinityConfig.Merge(m, src) +} +func (m *SessionAffinityConfig) XXX_Size() int { + return m.Size() +} +func (m *SessionAffinityConfig) XXX_DiscardUnknown() { + xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m) } -func (m *EventSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo + +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{185} +} +func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *StorageOSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSPersistentVolumeSource.Merge(m, src) +} +func (m *StorageOSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSPersistentVolumeSource.DiscardUnknown(m) } -func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n57, err := m.LastObservedTime.MarshalTo(dAtA[i:]) +var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo + +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{186} +} +func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n57 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil + return b[:n], nil +} +func (m *StorageOSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSVolumeSource.Merge(m, src) +} +func (m *StorageOSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSVolumeSource.DiscardUnknown(m) } -func (m *EventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{187} +} +func (m *Sysctl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sysctl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) - i += copy(dAtA[i:], m.Component) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil +func (m *Sysctl) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sysctl.Merge(m, src) +} +func (m *Sysctl) XXX_Size() int { + return m.Size() +} +func (m *Sysctl) XXX_DiscardUnknown() { + xxx_messageInfo_Sysctl.DiscardUnknown(m) } -func (m *ExecAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Sysctl proto.InternalMessageInfo + +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{188} +} +func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TCPSocketAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *TCPSocketAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TCPSocketAction.Merge(m, src) +} +func (m *TCPSocketAction) XXX_Size() int { + return m.Size() +} +func (m *TCPSocketAction) XXX_DiscardUnknown() { + xxx_messageInfo_TCPSocketAction.DiscardUnknown(m) } -func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } +var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo + +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{189} +} +func (m *Taint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Taint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Taint.Merge(m, src) +} +func (m *Taint) XXX_Size() int { + return m.Size() +} +func (m *Taint) XXX_DiscardUnknown() { + xxx_messageInfo_Taint.DiscardUnknown(m) } -func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Taint proto.InternalMessageInfo + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{190} +} +func (m *Toleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.Lun != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil +func (m *Toleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Toleration.Merge(m, src) } - -func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *Toleration) XXX_Size() int { + return m.Size() } - -func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n58, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n58 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil +func (m *Toleration) XXX_DiscardUnknown() { + xxx_messageInfo_Toleration.DiscardUnknown(m) } -func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +var xxx_messageInfo_Toleration proto.InternalMessageInfo -func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n59, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil +func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } +func (*TopologySelectorLabelRequirement) ProtoMessage() {} +func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{191} } - -func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorLabelRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) - i += copy(dAtA[i:], m.DatasetName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) - i += copy(dAtA[i:], m.DatasetUUID) - return i, nil +func (m *TopologySelectorLabelRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorLabelRequirement.Merge(m, src) } - -func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *TopologySelectorLabelRequirement) XXX_Size() int { + return m.Size() } - -func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) - i += copy(dAtA[i:], m.PDName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *TopologySelectorLabelRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorLabelRequirement.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo + +func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } +func (*TopologySelectorTerm) ProtoMessage() {} +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{192} +} +func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) - i += copy(dAtA[i:], m.Repository) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) - i += copy(dAtA[i:], m.Revision) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) - i += copy(dAtA[i:], m.Directory) - return i, nil +func (m *TopologySelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorTerm.Merge(m, src) +} +func (m *TopologySelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *TopologySelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorTerm.DiscardUnknown(m) } -func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo + +func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } +func (*TopologySpreadConstraint) ProtoMessage() {} +func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{193} +} +func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySpreadConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.EndpointsNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) - i += copy(dAtA[i:], *m.EndpointsNamespace) - } - return i, nil +func (m *TopologySpreadConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySpreadConstraint.Merge(m, src) +} +func (m *TopologySpreadConstraint) XXX_Size() int { + return m.Size() +} +func (m *TopologySpreadConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySpreadConstraint.DiscardUnknown(m) } -func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo + +func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } +func (*TypedLocalObjectReference) ProtoMessage() {} +func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{194} +} +func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedLocalObjectReference.Merge(m, src) +} +func (m *TypedLocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *TypedLocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m) } -func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{195} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n60, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n60 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) - i += copy(dAtA[i:], m.Scheme) - if len(m.HTTPHeaders) > 0 { - for _, msg := range m.HTTPHeaders { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) } -func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{196} +} +func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil +func (m *VolumeDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeDevice.Merge(m, src) +} +func (m *VolumeDevice) XXX_Size() int { + return m.Size() +} +func (m *VolumeDevice) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeDevice.DiscardUnknown(m) } -func (m *Handler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{197} +} +func (m *VolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *Handler) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Exec != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n61, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n61 - } - if m.HTTPGet != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n62, err := m.HTTPGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - } - if m.TCPSocket != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n63, err := m.TCPSocket.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 - } - return i, nil +func (m *VolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMount.Merge(m, src) +} +func (m *VolumeMount) XXX_Size() int { + return m.Size() +} +func (m *VolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMount.DiscardUnknown(m) } -func (m *HostAlias) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeMount proto.InternalMessageInfo + +func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } +func (*VolumeNodeAffinity) ProtoMessage() {} +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{198} +} +func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil +func (m *VolumeNodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeAffinity.Merge(m, src) +} +func (m *VolumeNodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeAffinity.DiscardUnknown(m) } -func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{199} +} +func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Type != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - return i, nil +func (m *VolumeProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeProjection.Merge(m, src) +} +func (m *VolumeProjection) XXX_Size() int { + return m.Size() +} +func (m *VolumeProjection) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeProjection.DiscardUnknown(m) } -func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{200} +} +func (m *VolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n64, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil +func (m *VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeSource.Merge(m, src) +} +func (m *VolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeSource.DiscardUnknown(m) } -func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeSource proto.InternalMessageInfo + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{201} +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n65, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n65 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil +func (m *VsphereVirtualDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.Merge(m, src) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VsphereVirtualDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.DiscardUnknown(m) } -func (m *KeyToPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{202} +} +func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WeightedPodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Mode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - } - return i, nil +func (m *WeightedPodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_WeightedPodAffinityTerm.Merge(m, src) +} +func (m *WeightedPodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *WeightedPodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_WeightedPodAffinityTerm.DiscardUnknown(m) } -func (m *Lifecycle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo + +func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } +func (*WindowsSecurityContextOptions) ProtoMessage() {} +func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{203} +} +func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsSecurityContextOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *WindowsSecurityContextOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsSecurityContextOptions.Merge(m, src) +} +func (m *WindowsSecurityContextOptions) XXX_Size() int { + return m.Size() +} +func (m *WindowsSecurityContextOptions) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsSecurityContextOptions.DiscardUnknown(m) } -func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PostStart != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n66, err := m.PostStart.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n66 - } - if m.PreStop != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n67, err := m.PreStop.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n67 - } - return i, nil +var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") + proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") + proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.ConfigMap.BinaryDataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ConfigMap.DataEntry") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") + proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") + proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer") + proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon") + proto.RegisterType((*EphemeralContainers)(nil), "k8s.io.api.core.v1.EphemeralContainers") + proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") + proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultRequestEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxLimitRequestRatioEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") + proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") + proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") + proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeResources.CapacityEntry") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec.CapacityEntry") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") + proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") + proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.PodSpec.NodeSelectorEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PodSpec.OverheadEntry") + proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec.HardEntry") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.HardEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.UsedEntry") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") + proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") + proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") + proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.Secret.DataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.Secret.StringDataEntry") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") + proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") + proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") + proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") + proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") + proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") + proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint") + proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") + proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") + proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions") } -func (m *LimitRange) Marshal() (dAtA []byte, err error) { +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptor_83c10c24ec417dc9) +} + +var fileDescriptor_83c10c24ec417dc9 = []byte{ + // 13567 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7b, 0x70, 0x24, 0x49, + 0x5a, 0x18, 0x7e, 0xd5, 0xad, 0x47, 0xf7, 0xa7, 0x77, 0xce, 0x63, 0x35, 0xda, 0x9d, 0xd1, 0x6c, + 0xed, 0xdd, 0xec, 0xec, 0xed, 0xae, 0xe6, 0xf6, 0x75, 0xbb, 0xdc, 0xde, 0x2d, 0x48, 0x6a, 0x69, + 0xa6, 0x77, 0x46, 0x9a, 0xde, 0x6c, 0xcd, 0xcc, 0xdd, 0xb2, 0x77, 0x5c, 0xa9, 0x2b, 0x25, 0xd5, + 0xaa, 0xbb, 0xaa, 0xb7, 0xaa, 0x5a, 0x33, 0xda, 0x1f, 0xc4, 0x0f, 0x1f, 0xcf, 0x33, 0xe0, 0xb8, + 0xb0, 0x09, 0x3f, 0x80, 0xc0, 0x11, 0x18, 0x07, 0x60, 0xb0, 0xc3, 0x18, 0x0c, 0x98, 0xc3, 0x36, + 0x06, 0xdb, 0x81, 0xfd, 0x07, 0xc6, 0x0e, 0xdb, 0x47, 0x04, 0x61, 0x19, 0x06, 0x87, 0x89, 0xfb, + 0xc3, 0x40, 0x18, 0xfc, 0x87, 0x65, 0xc2, 0x38, 0xf2, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0xad, 0x59, + 0x8d, 0x6e, 0xb9, 0xd8, 0xff, 0xba, 0xf3, 0xfb, 0xf2, 0xcb, 0xac, 0x7c, 0x7e, 0xf9, 0x3d, 0xe1, + 0xd5, 0xdd, 0x57, 0xa2, 0x05, 0x2f, 0xb8, 0xb2, 0xdb, 0xd9, 0x24, 0xa1, 0x4f, 0x62, 0x12, 0x5d, + 0xd9, 0x23, 0xbe, 0x1b, 0x84, 0x57, 0x04, 0xc0, 0x69, 0x7b, 0x57, 0x1a, 0x41, 0x48, 0xae, 0xec, + 0x3d, 0x77, 0x65, 0x9b, 0xf8, 0x24, 0x74, 0x62, 0xe2, 0x2e, 0xb4, 0xc3, 0x20, 0x0e, 0x10, 0xe2, + 0x38, 0x0b, 0x4e, 0xdb, 0x5b, 0xa0, 0x38, 0x0b, 0x7b, 0xcf, 0xcd, 0x3d, 0xbb, 0xed, 0xc5, 0x3b, + 0x9d, 0xcd, 0x85, 0x46, 0xd0, 0xba, 0xb2, 0x1d, 0x6c, 0x07, 0x57, 0x18, 0xea, 0x66, 0x67, 0x8b, + 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0x4e, 0x62, 0xee, 0xc5, 0xa4, 0x99, 0x96, 0xd3, 0xd8, 0xf1, 0x7c, + 0x12, 0xee, 0x5f, 0x69, 0xef, 0x6e, 0xb3, 0x76, 0x43, 0x12, 0x05, 0x9d, 0xb0, 0x41, 0xd2, 0x0d, + 0xf7, 0xac, 0x15, 0x5d, 0x69, 0x91, 0xd8, 0xc9, 0xe8, 0xee, 0xdc, 0x95, 0xbc, 0x5a, 0x61, 0xc7, + 0x8f, 0xbd, 0x56, 0x77, 0x33, 0x1f, 0xef, 0x57, 0x21, 0x6a, 0xec, 0x90, 0x96, 0xd3, 0x55, 0xef, + 0x85, 0xbc, 0x7a, 0x9d, 0xd8, 0x6b, 0x5e, 0xf1, 0xfc, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x2b, + 0x16, 0x5c, 0x5c, 0xbc, 0x53, 0x5f, 0x69, 0x3a, 0x51, 0xec, 0x35, 0x96, 0x9a, 0x41, 0x63, 0xb7, + 0x1e, 0x07, 0x21, 0xb9, 0x1d, 0x34, 0x3b, 0x2d, 0x52, 0x67, 0x03, 0x81, 0x9e, 0x81, 0xd2, 0x1e, + 0xfb, 0x5f, 0xad, 0xcc, 0x5a, 0x17, 0xad, 0xcb, 0xe5, 0xa5, 0xe9, 0xdf, 0x38, 0x98, 0xff, 0xd0, + 0xfd, 0x83, 0xf9, 0xd2, 0x6d, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x04, 0x23, 0x5b, 0xd1, 0xc6, 0x7e, + 0x9b, 0xcc, 0x16, 0x18, 0xee, 0xa4, 0xc0, 0x1d, 0x59, 0xad, 0xd3, 0x52, 0x2c, 0xa0, 0xe8, 0x0a, + 0x94, 0xdb, 0x4e, 0x18, 0x7b, 0xb1, 0x17, 0xf8, 0xb3, 0xc5, 0x8b, 0xd6, 0xe5, 0xe1, 0xa5, 0x19, + 0x81, 0x5a, 0xae, 0x49, 0x00, 0x4e, 0x70, 0x68, 0x37, 0x42, 0xe2, 0xb8, 0x37, 0xfd, 0xe6, 0xfe, + 0xec, 0xd0, 0x45, 0xeb, 0x72, 0x29, 0xe9, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x54, 0x80, + 0xd2, 0xe2, 0xd6, 0x96, 0xe7, 0x7b, 0xf1, 0x3e, 0xba, 0x0d, 0xe3, 0x7e, 0xe0, 0x12, 0xf9, 0x9f, + 0x7d, 0xc5, 0xd8, 0xf3, 0x17, 0x17, 0xba, 0x97, 0xd2, 0xc2, 0xba, 0x86, 0xb7, 0x34, 0x7d, 0xff, + 0x60, 0x7e, 0x5c, 0x2f, 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xac, 0x1d, 0xb8, 0x8a, 0x6c, 0x81, 0x91, + 0x9d, 0xcf, 0x22, 0x5b, 0x4b, 0xd0, 0x96, 0xa6, 0xee, 0x1f, 0xcc, 0x8f, 0x69, 0x05, 0x58, 0x27, + 0x82, 0x36, 0x61, 0x8a, 0xfe, 0xf5, 0x63, 0x4f, 0xd1, 0x2d, 0x32, 0xba, 0x4f, 0xe4, 0xd1, 0xd5, + 0x50, 0x97, 0x4e, 0xdd, 0x3f, 0x98, 0x9f, 0x4a, 0x15, 0xe2, 0x34, 0x41, 0xfb, 0x5d, 0x98, 0x5c, + 0x8c, 0x63, 0xa7, 0xb1, 0x43, 0x5c, 0x3e, 0x83, 0xe8, 0x45, 0x18, 0xf2, 0x9d, 0x16, 0x11, 0xf3, + 0x7b, 0x51, 0x0c, 0xec, 0xd0, 0xba, 0xd3, 0x22, 0x87, 0x07, 0xf3, 0xd3, 0xb7, 0x7c, 0xef, 0x9d, + 0x8e, 0x58, 0x15, 0xb4, 0x0c, 0x33, 0x6c, 0xf4, 0x3c, 0x80, 0x4b, 0xf6, 0xbc, 0x06, 0xa9, 0x39, + 0xf1, 0x8e, 0x98, 0x6f, 0x24, 0xea, 0x42, 0x45, 0x41, 0xb0, 0x86, 0x65, 0xdf, 0x83, 0xf2, 0xe2, + 0x5e, 0xe0, 0xb9, 0xb5, 0xc0, 0x8d, 0xd0, 0x2e, 0x4c, 0xb5, 0x43, 0xb2, 0x45, 0x42, 0x55, 0x34, + 0x6b, 0x5d, 0x2c, 0x5e, 0x1e, 0x7b, 0xfe, 0x72, 0xe6, 0xc7, 0x9a, 0xa8, 0x2b, 0x7e, 0x1c, 0xee, + 0x2f, 0x3d, 0x22, 0xda, 0x9b, 0x4a, 0x41, 0x71, 0x9a, 0xb2, 0xfd, 0x2f, 0x0b, 0x70, 0x66, 0xf1, + 0xdd, 0x4e, 0x48, 0x2a, 0x5e, 0xb4, 0x9b, 0x5e, 0xe1, 0xae, 0x17, 0xed, 0xae, 0x27, 0x23, 0xa0, + 0x96, 0x56, 0x45, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x85, 0x51, 0xfa, 0xfb, 0x16, 0xae, 0x8a, 0x4f, + 0x3e, 0x25, 0x90, 0xc7, 0x2a, 0x4e, 0xec, 0x54, 0x38, 0x08, 0x4b, 0x1c, 0xb4, 0x06, 0x63, 0x0d, + 0xb6, 0x21, 0xb7, 0xd7, 0x02, 0x97, 0xb0, 0xc9, 0x2c, 0x2f, 0x3d, 0x4d, 0xd1, 0x97, 0x93, 0xe2, + 0xc3, 0x83, 0xf9, 0x59, 0xde, 0x37, 0x41, 0x42, 0x83, 0x61, 0xbd, 0x3e, 0xb2, 0xd5, 0xfe, 0x1a, + 0x62, 0x94, 0x20, 0x63, 0x6f, 0x5d, 0xd6, 0xb6, 0xca, 0x30, 0xdb, 0x2a, 0xe3, 0xd9, 0xdb, 0x04, + 0x3d, 0x07, 0x43, 0xbb, 0x9e, 0xef, 0xce, 0x8e, 0x30, 0x5a, 0xe7, 0xe9, 0x9c, 0x5f, 0xf7, 0x7c, + 0xf7, 0xf0, 0x60, 0x7e, 0xc6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x13, 0x0b, 0xe6, 0x19, + 0x6c, 0xd5, 0x6b, 0x92, 0x1a, 0x09, 0x23, 0x2f, 0x8a, 0x89, 0x1f, 0x1b, 0x03, 0xfa, 0x3c, 0x40, + 0x44, 0x1a, 0x21, 0x89, 0xb5, 0x21, 0x55, 0x0b, 0xa3, 0xae, 0x20, 0x58, 0xc3, 0xa2, 0x07, 0x42, + 0xb4, 0xe3, 0x84, 0x6c, 0x7d, 0x89, 0x81, 0x55, 0x07, 0x42, 0x5d, 0x02, 0x70, 0x82, 0x63, 0x1c, + 0x08, 0xc5, 0x7e, 0x07, 0x02, 0xfa, 0x14, 0x4c, 0x25, 0x8d, 0x45, 0x6d, 0xa7, 0x21, 0x07, 0x90, + 0x6d, 0x99, 0xba, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0xef, 0x59, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, + 0x7f, 0xab, 0xfd, 0x4b, 0x16, 0x8c, 0x2e, 0x79, 0xbe, 0xeb, 0xf9, 0xdb, 0xe8, 0xf3, 0x50, 0xa2, + 0x77, 0x93, 0xeb, 0xc4, 0x8e, 0x38, 0xf7, 0x3e, 0xa6, 0xed, 0x2d, 0x75, 0x55, 0x2c, 0xb4, 0x77, + 0xb7, 0x69, 0x41, 0xb4, 0x40, 0xb1, 0xe9, 0x6e, 0xbb, 0xb9, 0xf9, 0x36, 0x69, 0xc4, 0x6b, 0x24, + 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0xae, 0xc3, 0x48, 0xec, 0x84, 0xdb, 0x24, 0x16, + 0x07, 0x60, 0xe6, 0x41, 0xc5, 0x6b, 0x62, 0xba, 0x23, 0x89, 0xdf, 0x20, 0xc9, 0xb5, 0xb0, 0xc1, + 0xaa, 0x62, 0x41, 0xc2, 0xfe, 0x81, 0x51, 0x38, 0xb7, 0x5c, 0xaf, 0xe6, 0xac, 0xab, 0x4b, 0x30, + 0xe2, 0x86, 0xde, 0x1e, 0x09, 0xc5, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x02, + 0xe3, 0xfc, 0x42, 0xba, 0xe6, 0xf8, 0x6e, 0x53, 0x0e, 0xf1, 0x69, 0x81, 0x3d, 0x7e, 0x5b, 0x83, + 0x61, 0x03, 0xf3, 0x88, 0x8b, 0xea, 0x52, 0x6a, 0x33, 0xe6, 0x5d, 0x76, 0x5f, 0xb4, 0x60, 0x9a, + 0x37, 0xb3, 0x18, 0xc7, 0xa1, 0xb7, 0xd9, 0x89, 0x49, 0x34, 0x3b, 0xcc, 0x4e, 0xba, 0xe5, 0xac, + 0xd1, 0xca, 0x1d, 0x81, 0x85, 0xdb, 0x29, 0x2a, 0xfc, 0x10, 0x9c, 0x15, 0xed, 0x4e, 0xa7, 0xc1, + 0xb8, 0xab, 0x59, 0xf4, 0x1d, 0x16, 0xcc, 0x35, 0x02, 0x3f, 0x0e, 0x83, 0x66, 0x93, 0x84, 0xb5, + 0xce, 0x66, 0xd3, 0x8b, 0x76, 0xf8, 0x3a, 0xc5, 0x64, 0x8b, 0x9d, 0x04, 0x39, 0x73, 0xa8, 0x90, + 0xc4, 0x1c, 0x5e, 0xb8, 0x7f, 0x30, 0x3f, 0xb7, 0x9c, 0x4b, 0x0a, 0xf7, 0x68, 0x06, 0xed, 0x02, + 0xa2, 0x57, 0x69, 0x3d, 0x76, 0xb6, 0x49, 0xd2, 0xf8, 0xe8, 0xe0, 0x8d, 0x9f, 0xbd, 0x7f, 0x30, + 0x8f, 0xd6, 0xbb, 0x48, 0xe0, 0x0c, 0xb2, 0xe8, 0x1d, 0x38, 0x4d, 0x4b, 0xbb, 0xbe, 0xb5, 0x34, + 0x78, 0x73, 0xb3, 0xf7, 0x0f, 0xe6, 0x4f, 0xaf, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0xb7, 0x5b, + 0x70, 0x2e, 0xf9, 0xfc, 0x95, 0x7b, 0x6d, 0xc7, 0x77, 0x93, 0x86, 0xcb, 0x83, 0x37, 0x4c, 0xcf, + 0xe4, 0x73, 0xcb, 0x79, 0x94, 0x70, 0x7e, 0x23, 0x73, 0xcb, 0x70, 0x26, 0x73, 0xb5, 0xa0, 0x69, + 0x28, 0xee, 0x12, 0xce, 0x05, 0x95, 0x31, 0xfd, 0x89, 0x4e, 0xc3, 0xf0, 0x9e, 0xd3, 0xec, 0x88, + 0x8d, 0x82, 0xf9, 0x9f, 0x4f, 0x14, 0x5e, 0xb1, 0xec, 0x7f, 0x55, 0x84, 0xa9, 0xe5, 0x7a, 0xf5, + 0x81, 0x76, 0xa1, 0x7e, 0x0d, 0x15, 0x7a, 0x5e, 0x43, 0xc9, 0xa5, 0x56, 0xcc, 0xbd, 0xd4, 0xfe, + 0xff, 0x8c, 0x2d, 0x34, 0xc4, 0xb6, 0xd0, 0x37, 0xe4, 0x6c, 0xa1, 0x63, 0xde, 0x38, 0x7b, 0x39, + 0xab, 0x68, 0x98, 0x4d, 0x66, 0x26, 0xc7, 0x72, 0x23, 0x68, 0x38, 0xcd, 0xf4, 0xd1, 0x77, 0xc4, + 0xa5, 0x74, 0x3c, 0xf3, 0xd8, 0x80, 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0x7a, 0x4d, 0x2f, 0xf6, 0x48, + 0x84, 0x9e, 0x84, 0xa2, 0xe3, 0xba, 0x8c, 0xdb, 0x2a, 0x2f, 0x9d, 0xb9, 0x7f, 0x30, 0x5f, 0x5c, + 0x74, 0xe9, 0xb5, 0x0f, 0x0a, 0x6b, 0x1f, 0x53, 0x0c, 0xf4, 0x51, 0x18, 0x72, 0xc3, 0xa0, 0x3d, + 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x55, 0xc2, 0xa0, 0x9d, 0x42, 0x65, 0x38, 0xf6, 0xaf, 0x16, + 0xe0, 0xb1, 0x65, 0xd2, 0xde, 0x59, 0xad, 0xe7, 0x9c, 0xdf, 0x97, 0xa1, 0xd4, 0x0a, 0x7c, 0x2f, + 0x0e, 0xc2, 0x48, 0x34, 0xcd, 0x56, 0xc4, 0x9a, 0x28, 0xc3, 0x0a, 0x8a, 0x2e, 0xc2, 0x50, 0x3b, + 0x61, 0x2a, 0xc7, 0x25, 0x43, 0xca, 0xd8, 0x49, 0x06, 0xa1, 0x18, 0x9d, 0x88, 0x84, 0x62, 0xc5, + 0x28, 0x8c, 0x5b, 0x11, 0x09, 0x31, 0x83, 0x24, 0x37, 0x33, 0xbd, 0xb3, 0xc5, 0x09, 0x9d, 0xba, + 0x99, 0x29, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0xe5, 0x28, 0x35, 0xb3, 0x03, 0x6d, 0xd3, 0x09, 0x76, + 0x75, 0xab, 0x99, 0x4c, 0x88, 0x18, 0x37, 0xca, 0x48, 0xdf, 0xab, 0xfb, 0xcb, 0x05, 0x40, 0x7c, + 0x08, 0xff, 0x82, 0x0d, 0xdc, 0xad, 0xee, 0x81, 0x1b, 0x7c, 0x4b, 0x1c, 0xd7, 0xe8, 0xfd, 0xa9, + 0x05, 0x8f, 0x2d, 0x7b, 0xbe, 0x4b, 0xc2, 0x9c, 0x05, 0xf8, 0x70, 0xde, 0xb2, 0x47, 0x63, 0x1a, + 0x8c, 0x25, 0x36, 0x74, 0x0c, 0x4b, 0xcc, 0xfe, 0x23, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd, 0xc7, + 0xde, 0xea, 0xfe, 0xd8, 0x63, 0x58, 0x16, 0xf6, 0x0d, 0x98, 0x5c, 0x6e, 0x7a, 0xc4, 0x8f, 0xab, + 0xb5, 0xe5, 0xc0, 0xdf, 0xf2, 0xb6, 0xd1, 0x27, 0x60, 0x32, 0xf6, 0x5a, 0x24, 0xe8, 0xc4, 0x75, + 0xd2, 0x08, 0x7c, 0xf6, 0x92, 0xb4, 0x2e, 0x0f, 0x2f, 0xa1, 0xfb, 0x07, 0xf3, 0x93, 0x1b, 0x06, + 0x04, 0xa7, 0x30, 0xed, 0xdf, 0xa1, 0xe3, 0x17, 0xb4, 0xda, 0x81, 0x4f, 0xfc, 0x78, 0x39, 0xf0, + 0x5d, 0x2e, 0x71, 0xf8, 0x04, 0x0c, 0xc5, 0x74, 0x3c, 0xf8, 0xd8, 0x5d, 0x92, 0x1b, 0x85, 0x8e, + 0xc2, 0xe1, 0xc1, 0xfc, 0xd9, 0xee, 0x1a, 0x6c, 0x9c, 0x58, 0x1d, 0xf4, 0x0d, 0x30, 0x12, 0xc5, + 0x4e, 0xdc, 0x89, 0xc4, 0x68, 0x3e, 0x2e, 0x47, 0xb3, 0xce, 0x4a, 0x0f, 0x0f, 0xe6, 0xa7, 0x54, + 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0x8c, 0xb6, 0x48, 0x14, 0x39, 0xdb, 0xf2, 0x36, 0x9c, + 0x12, 0x75, 0x47, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x04, 0x0c, 0x93, 0x30, 0x0c, 0x42, 0xb1, + 0x47, 0x27, 0x04, 0xe2, 0xf0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x3b, 0x0b, 0xa6, 0x54, 0x5f, + 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0x78, 0x13, 0xa0, 0x21, 0x3f, 0x30, 0x62, 0xb7, 0xc7, 0xd8, 0xf3, + 0x97, 0x32, 0x2f, 0xea, 0xae, 0x61, 0x4c, 0x28, 0xab, 0xa2, 0x08, 0x6b, 0xd4, 0xec, 0x7f, 0x6a, + 0xc1, 0xa9, 0xd4, 0x17, 0xdd, 0xf0, 0xa2, 0x18, 0xbd, 0xd5, 0xf5, 0x55, 0x0b, 0x83, 0x7d, 0x15, + 0xad, 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x35, 0x18, 0xf6, 0x62, 0xd2, 0x92, + 0x1f, 0xf3, 0x44, 0xcf, 0x8f, 0xe1, 0xbd, 0x4a, 0x66, 0xa4, 0x4a, 0x6b, 0x62, 0x4e, 0xc0, 0xfe, + 0x6b, 0x45, 0x28, 0xf3, 0x65, 0xbb, 0xe6, 0xb4, 0x4f, 0x60, 0x2e, 0xaa, 0x30, 0xc4, 0xa8, 0xf3, + 0x8e, 0x3f, 0x99, 0xdd, 0x71, 0xd1, 0x9d, 0x05, 0xfa, 0xe4, 0xe7, 0xcc, 0x91, 0xba, 0x1a, 0x68, + 0x11, 0x66, 0x24, 0x90, 0x03, 0xb0, 0xe9, 0xf9, 0x4e, 0xb8, 0x4f, 0xcb, 0x66, 0x8b, 0x8c, 0xe0, + 0xb3, 0xbd, 0x09, 0x2e, 0x29, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x01, 0x60, 0x8d, 0xe8, 0xdc, 0xcb, + 0x50, 0x56, 0xc8, 0x47, 0xe1, 0x71, 0xe6, 0x3e, 0x05, 0x53, 0xa9, 0xb6, 0xfa, 0x55, 0x1f, 0xd7, + 0x59, 0xa4, 0x5f, 0x66, 0xa7, 0x80, 0xe8, 0xf5, 0x8a, 0xbf, 0x27, 0x4e, 0xd1, 0x77, 0xe1, 0x74, + 0x33, 0xe3, 0x70, 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x98, 0xf8, 0xec, 0xd3, 0x59, 0x50, 0x9c, + 0xd9, 0x06, 0xbd, 0xf6, 0x83, 0x36, 0x5d, 0xf3, 0x4e, 0x53, 0xe7, 0xa0, 0x6f, 0x8a, 0x32, 0xac, + 0xa0, 0xf4, 0x08, 0x3b, 0xad, 0x3a, 0x7f, 0x9d, 0xec, 0xd7, 0x49, 0x93, 0x34, 0xe2, 0x20, 0xfc, + 0x9a, 0x76, 0xff, 0x3c, 0x1f, 0x7d, 0x7e, 0x02, 0x8e, 0x09, 0x02, 0xc5, 0xeb, 0x64, 0x9f, 0x4f, + 0x85, 0xfe, 0x75, 0xc5, 0x9e, 0x5f, 0xf7, 0xb3, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x81, 0xad, 0xbe, + 0x64, 0x6e, 0xf5, 0xf3, 0x3d, 0x17, 0x78, 0xce, 0x26, 0xff, 0x72, 0x01, 0xce, 0x29, 0x1c, 0xca, + 0xee, 0xf3, 0x3f, 0x62, 0x55, 0x5d, 0x81, 0xb2, 0xaf, 0x04, 0x51, 0x96, 0x29, 0x01, 0x4a, 0xc4, + 0x50, 0x09, 0x0e, 0xe5, 0xda, 0xfc, 0x44, 0x5a, 0x34, 0xae, 0x4b, 0x68, 0x85, 0x34, 0x76, 0x09, + 0x8a, 0x1d, 0xcf, 0x15, 0x77, 0xc6, 0xc7, 0xe4, 0x68, 0xdf, 0xaa, 0x56, 0x0e, 0x0f, 0xe6, 0x1f, + 0xcf, 0xd3, 0x0e, 0xd0, 0xcb, 0x2a, 0x5a, 0xb8, 0x55, 0xad, 0x60, 0x5a, 0x19, 0x2d, 0xc2, 0x94, + 0x54, 0x80, 0xdc, 0xa6, 0x1c, 0x54, 0xe0, 0x8b, 0xab, 0x45, 0x89, 0x59, 0xb1, 0x09, 0xc6, 0x69, + 0x7c, 0x54, 0x81, 0xe9, 0xdd, 0xce, 0x26, 0x69, 0x92, 0x98, 0x7f, 0xf0, 0x75, 0xc2, 0x85, 0x90, + 0xe5, 0xe4, 0xb1, 0x75, 0x3d, 0x05, 0xc7, 0x5d, 0x35, 0xec, 0x3f, 0x67, 0x47, 0xbc, 0x18, 0xbd, + 0x5a, 0x18, 0xd0, 0x85, 0x45, 0xa9, 0x7f, 0x2d, 0x97, 0xf3, 0x20, 0xab, 0xe2, 0x3a, 0xd9, 0xdf, + 0x08, 0x28, 0xb3, 0x9d, 0xbd, 0x2a, 0x8c, 0x35, 0x3f, 0xd4, 0x73, 0xcd, 0xff, 0x7c, 0x01, 0xce, + 0xa8, 0x11, 0x30, 0xf8, 0xba, 0xbf, 0xe8, 0x63, 0xf0, 0x1c, 0x8c, 0xb9, 0x64, 0xcb, 0xe9, 0x34, + 0x63, 0x25, 0x11, 0x1f, 0xe6, 0x5a, 0x91, 0x4a, 0x52, 0x8c, 0x75, 0x9c, 0x23, 0x0c, 0xdb, 0xff, + 0x1a, 0x63, 0x77, 0x6b, 0xec, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xbb, 0x6b, 0x9e, 0x80, 0x61, + 0xaf, 0x45, 0x79, 0xad, 0x82, 0xc9, 0x42, 0x55, 0x69, 0x21, 0xe6, 0x30, 0xf4, 0x11, 0x18, 0x6d, + 0x04, 0xad, 0x96, 0xe3, 0xbb, 0xec, 0xca, 0x2b, 0x2f, 0x8d, 0x51, 0x76, 0x6c, 0x99, 0x17, 0x61, + 0x09, 0x43, 0x8f, 0xc1, 0x90, 0x13, 0x6e, 0x73, 0xb1, 0x44, 0x79, 0xa9, 0x44, 0x5b, 0x5a, 0x0c, + 0xb7, 0x23, 0xcc, 0x4a, 0xe9, 0xab, 0xea, 0x6e, 0x10, 0xee, 0x7a, 0xfe, 0x76, 0xc5, 0x0b, 0xc5, + 0x96, 0x50, 0x77, 0xe1, 0x1d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x85, 0xe1, 0x76, 0x10, 0xc6, 0xd1, + 0xec, 0x08, 0x1b, 0xee, 0xc7, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0xb5, 0x20, 0x8c, 0x93, 0x0f, 0xa0, + 0xff, 0x22, 0xcc, 0xab, 0xa3, 0x1b, 0x30, 0x4a, 0xfc, 0xbd, 0xd5, 0x30, 0x68, 0xcd, 0x9e, 0xca, + 0xa7, 0xb4, 0xc2, 0x51, 0xf8, 0x32, 0x4b, 0xd8, 0x4e, 0x51, 0x8c, 0x25, 0x09, 0xf4, 0x0d, 0x50, + 0x24, 0xfe, 0xde, 0xec, 0x28, 0xa3, 0x34, 0x97, 0x43, 0xe9, 0xb6, 0x13, 0x26, 0x67, 0xfe, 0x8a, + 0xbf, 0x87, 0x69, 0x1d, 0xf4, 0x19, 0x28, 0xcb, 0x03, 0x23, 0x12, 0xf2, 0xb7, 0xcc, 0x05, 0x2b, + 0x8f, 0x19, 0x4c, 0xde, 0xe9, 0x78, 0x21, 0x69, 0x11, 0x3f, 0x8e, 0x92, 0x13, 0x52, 0x42, 0x23, + 0x9c, 0x50, 0x43, 0x9f, 0x91, 0x42, 0xdf, 0xb5, 0xa0, 0xe3, 0xc7, 0xd1, 0x6c, 0x99, 0x75, 0x2f, + 0x53, 0x1d, 0x77, 0x3b, 0xc1, 0x4b, 0x4b, 0x85, 0x79, 0x65, 0x6c, 0x90, 0x42, 0x9f, 0x85, 0x09, + 0xfe, 0x9f, 0x2b, 0xb5, 0xa2, 0xd9, 0x33, 0x8c, 0xf6, 0xc5, 0x7c, 0xda, 0x1c, 0x71, 0xe9, 0x8c, + 0x20, 0x3e, 0xa1, 0x97, 0x46, 0xd8, 0xa4, 0x86, 0x30, 0x4c, 0x34, 0xbd, 0x3d, 0xe2, 0x93, 0x28, + 0xaa, 0x85, 0xc1, 0x26, 0x99, 0x05, 0x36, 0x30, 0xe7, 0xb2, 0x95, 0x60, 0xc1, 0x26, 0x59, 0x9a, + 0xa1, 0x34, 0x6f, 0xe8, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x82, 0x49, 0xfa, 0x08, 0xf3, 0x12, 0xa2, + 0x63, 0xfd, 0x88, 0xb2, 0xa7, 0x12, 0x36, 0x2a, 0xe1, 0x14, 0x11, 0x74, 0x13, 0xc6, 0xa3, 0xd8, + 0x09, 0xe3, 0x4e, 0x9b, 0x13, 0x3d, 0xdb, 0x8f, 0x28, 0xd3, 0xa1, 0xd6, 0xb5, 0x2a, 0xd8, 0x20, + 0x80, 0x5e, 0x87, 0x72, 0xd3, 0xdb, 0x22, 0x8d, 0xfd, 0x46, 0x93, 0xcc, 0x8e, 0x33, 0x6a, 0x99, + 0x87, 0xca, 0x0d, 0x89, 0xc4, 0x5f, 0x85, 0xea, 0x2f, 0x4e, 0xaa, 0xa3, 0xdb, 0x70, 0x36, 0x26, + 0x61, 0xcb, 0xf3, 0x1d, 0x7a, 0x18, 0x88, 0xd7, 0x12, 0xd3, 0x4d, 0x4e, 0xb0, 0xdd, 0x76, 0x41, + 0xcc, 0xc6, 0xd9, 0x8d, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x83, 0xd9, 0x0c, 0x48, 0xd0, 0xf4, + 0x1a, 0xfb, 0xb3, 0xa7, 0x19, 0xe5, 0x4f, 0x0a, 0xca, 0xb3, 0x1b, 0x39, 0x78, 0x87, 0x3d, 0x60, + 0x38, 0x97, 0x3a, 0xba, 0x09, 0x53, 0xec, 0x04, 0xaa, 0x75, 0x9a, 0x4d, 0xd1, 0xe0, 0x24, 0x6b, + 0xf0, 0x23, 0xf2, 0x3e, 0xae, 0x9a, 0xe0, 0xc3, 0x83, 0x79, 0x48, 0xfe, 0xe1, 0x74, 0x6d, 0xb4, + 0xc9, 0xd4, 0x60, 0x9d, 0xd0, 0x8b, 0xf7, 0xe9, 0xb9, 0x41, 0xee, 0xc5, 0xb3, 0x53, 0x3d, 0x45, + 0x10, 0x3a, 0xaa, 0xd2, 0x95, 0xe9, 0x85, 0x38, 0x4d, 0x90, 0x1e, 0xa9, 0x51, 0xec, 0x7a, 0xfe, + 0xec, 0x34, 0x3b, 0xa9, 0xd5, 0x89, 0x54, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0x0a, 0x8c, 0xfe, 0xb8, + 0x49, 0x6f, 0xae, 0x19, 0x86, 0x98, 0xa8, 0xc0, 0x24, 0x00, 0x27, 0x38, 0x94, 0x99, 0x8c, 0xe3, + 0xfd, 0x59, 0xc4, 0x50, 0xd5, 0xc1, 0xb2, 0xb1, 0xf1, 0x19, 0x4c, 0xcb, 0xed, 0x4d, 0x98, 0x54, + 0x07, 0x21, 0x1b, 0x13, 0x34, 0x0f, 0xc3, 0x8c, 0x7d, 0x12, 0x02, 0xb3, 0x32, 0xed, 0x02, 0x63, + 0xad, 0x30, 0x2f, 0x67, 0x5d, 0xf0, 0xde, 0x25, 0x4b, 0xfb, 0x31, 0xe1, 0xcf, 0xf4, 0xa2, 0xd6, + 0x05, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x7f, 0x39, 0x1b, 0x9a, 0x9c, 0xb6, 0x03, 0xdc, 0x2f, 0xcf, + 0x40, 0x69, 0x27, 0x88, 0x62, 0x8a, 0xcd, 0xda, 0x18, 0x4e, 0x18, 0xcf, 0x6b, 0xa2, 0x1c, 0x2b, + 0x0c, 0xf4, 0x2a, 0x4c, 0x34, 0xf4, 0x06, 0xc4, 0xe5, 0xa8, 0x8e, 0x11, 0xa3, 0x75, 0x6c, 0xe2, + 0xa2, 0x57, 0xa0, 0xc4, 0xcc, 0x3a, 0x1a, 0x41, 0x53, 0x70, 0x6d, 0xf2, 0x86, 0x2f, 0xd5, 0x44, + 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x74, 0x09, 0x46, 0x68, 0x17, 0xaa, 0x35, 0x71, 0x2d, 0x29, + 0xd9, 0xcf, 0x35, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0xab, 0x05, 0x6d, 0x94, 0xe9, 0x13, 0x97, 0xa0, + 0x1a, 0x8c, 0xde, 0x75, 0xbc, 0xd8, 0xf3, 0xb7, 0x05, 0xff, 0xf1, 0x54, 0xcf, 0x3b, 0x8a, 0x55, + 0xba, 0xc3, 0x2b, 0xf0, 0x5b, 0x54, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0x76, 0x7c, 0x9f, 0x52, + 0x2c, 0x0c, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, 0x16, 0x80, 0xdc, + 0x61, 0xc4, 0x15, 0xe6, 0x14, 0xcf, 0xf4, 0x27, 0xba, 0xa1, 0xea, 0x2c, 0x4d, 0xd2, 0x3b, 0x3a, + 0xf9, 0x8f, 0x35, 0x7a, 0x76, 0xcc, 0xf8, 0xb4, 0xee, 0xce, 0xa0, 0x6f, 0xa6, 0x4b, 0xdc, 0x09, + 0x63, 0xe2, 0x2e, 0xc6, 0x62, 0x70, 0x3e, 0x3a, 0xd8, 0x23, 0x65, 0xc3, 0x6b, 0x11, 0x7d, 0x3b, + 0x08, 0x22, 0x38, 0xa1, 0x67, 0xff, 0x62, 0x11, 0x66, 0xf3, 0xba, 0x4b, 0x17, 0x1d, 0xb9, 0xe7, + 0xc5, 0xcb, 0x94, 0xbd, 0xb2, 0xcc, 0x45, 0xb7, 0x22, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, 0x79, + 0xdb, 0xf2, 0x8d, 0x39, 0x9c, 0xcc, 0x7e, 0x9d, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x21, 0x71, 0x22, + 0x61, 0xaf, 0xa3, 0xad, 0x12, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x02, 0xac, 0xa1, 0x3e, 0x02, 0x2c, + 0x63, 0x88, 0x86, 0x8f, 0x77, 0x88, 0xd0, 0xe7, 0x00, 0xb6, 0x3c, 0xdf, 0x8b, 0x76, 0x18, 0xf5, + 0x91, 0x23, 0x53, 0x57, 0xcc, 0xd9, 0xaa, 0xa2, 0x82, 0x35, 0x8a, 0xe8, 0x25, 0x18, 0x53, 0x1b, + 0xb0, 0x5a, 0x61, 0xca, 0x4b, 0xcd, 0x18, 0x24, 0x39, 0x8d, 0x2a, 0x58, 0xc7, 0xb3, 0xdf, 0x4e, + 0xaf, 0x17, 0xb1, 0x03, 0xb4, 0xf1, 0xb5, 0x06, 0x1d, 0xdf, 0x42, 0xef, 0xf1, 0xb5, 0xbf, 0x5a, + 0x84, 0x29, 0xa3, 0xb1, 0x4e, 0x34, 0xc0, 0x99, 0x75, 0x95, 0x1e, 0xe0, 0x4e, 0x4c, 0xc4, 0xfe, + 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x90, 0xa7, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x83, 0x72, 0xd3, 0x89, + 0x98, 0x30, 0x8c, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0xe4, 0x61, 0xe2, 0x44, 0xb1, 0x76, 0x6b, 0x72, + 0xda, 0x09, 0x49, 0x7a, 0xd3, 0x50, 0xfe, 0x44, 0x1a, 0x84, 0xa9, 0x4e, 0x50, 0x26, 0x66, 0x1f, + 0x73, 0x18, 0x7a, 0x05, 0xc6, 0x43, 0xc2, 0x56, 0xc5, 0x32, 0xe5, 0xe6, 0xd8, 0x32, 0x1b, 0x4e, + 0xd8, 0x3e, 0xac, 0xc1, 0xb0, 0x81, 0x99, 0xbc, 0x0d, 0x46, 0x7a, 0xbc, 0x0d, 0x9e, 0x82, 0x51, + 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x55, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0x4a, 0x83, 0x2d, + 0x18, 0xfa, 0xfa, 0x10, 0x8b, 0x9a, 0x29, 0x8e, 0x4b, 0xfc, 0x94, 0x13, 0x4b, 0x1e, 0x4b, 0x98, + 0xfd, 0x51, 0x98, 0xac, 0x38, 0xa4, 0x15, 0xf8, 0x2b, 0xbe, 0xdb, 0x0e, 0x3c, 0x3f, 0x46, 0xb3, + 0x30, 0xc4, 0x2e, 0x11, 0x7e, 0x04, 0x0c, 0xd1, 0x86, 0xf0, 0x10, 0x7d, 0x10, 0xd8, 0xdb, 0x70, + 0xa6, 0x12, 0xdc, 0xf5, 0xef, 0x3a, 0xa1, 0xbb, 0x58, 0xab, 0x6a, 0xef, 0xeb, 0x75, 0xf9, 0xbe, + 0xe3, 0x76, 0x58, 0x99, 0x47, 0xaf, 0x56, 0x93, 0xb3, 0xb5, 0xab, 0x5e, 0x93, 0xe4, 0x48, 0x41, + 0xfe, 0x46, 0xc1, 0x68, 0x29, 0xc1, 0x57, 0x8a, 0x2a, 0x2b, 0x57, 0x51, 0xf5, 0x06, 0x94, 0xb6, + 0x3c, 0xd2, 0x74, 0x31, 0xd9, 0x12, 0x2b, 0xf1, 0xc9, 0x7c, 0xd3, 0x92, 0x55, 0x8a, 0x29, 0xa5, + 0x5e, 0xfc, 0x75, 0xb8, 0x2a, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc2, 0xb4, 0x7c, 0x30, 0x48, 0xa8, + 0x58, 0x97, 0x4f, 0xf5, 0x7a, 0x85, 0x98, 0xc4, 0x4f, 0xdf, 0x3f, 0x98, 0x9f, 0xc6, 0x29, 0x32, + 0xb8, 0x8b, 0x30, 0x7d, 0x0e, 0xb6, 0xe8, 0x09, 0x3c, 0xc4, 0x86, 0x9f, 0x3d, 0x07, 0xd9, 0xcb, + 0x96, 0x95, 0xda, 0x3f, 0x62, 0xc1, 0x23, 0x5d, 0x23, 0x23, 0x5e, 0xf8, 0xc7, 0x3c, 0x0b, 0xe9, + 0x17, 0x77, 0xa1, 0xff, 0x8b, 0xdb, 0xfe, 0x19, 0x0b, 0x4e, 0xaf, 0xb4, 0xda, 0xf1, 0x7e, 0xc5, + 0x33, 0xb5, 0x4a, 0x2f, 0xc3, 0x48, 0x8b, 0xb8, 0x5e, 0xa7, 0x25, 0x66, 0x6e, 0x5e, 0x9e, 0x52, + 0x6b, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0xa2, 0x1e, 0x07, 0xa1, 0xb3, 0x4d, 0x78, 0x01, 0x16, 0xe8, + 0xec, 0xac, 0xf7, 0xde, 0x25, 0x37, 0xbc, 0x96, 0x27, 0x4d, 0x85, 0x7a, 0xca, 0xec, 0x16, 0xe4, + 0x80, 0x2e, 0xbc, 0xd1, 0x71, 0xfc, 0xd8, 0x8b, 0xf7, 0x85, 0x42, 0x48, 0x12, 0xc1, 0x09, 0x3d, + 0xfb, 0x2b, 0x16, 0x4c, 0xc9, 0x75, 0xbf, 0xe8, 0xba, 0x21, 0x89, 0x22, 0x34, 0x07, 0x05, 0xaf, + 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x6a, 0x0d, 0x17, 0xbc, 0xb6, 0x64, 0xcb, 0xd8, 0x41, 0x58, + 0x34, 0x75, 0x63, 0xd7, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x94, 0xfc, 0xc0, 0xe5, 0xe6, 0x5a, + 0xfc, 0x4a, 0x63, 0x0b, 0x6c, 0x5d, 0x94, 0x61, 0x05, 0x45, 0x35, 0x28, 0x73, 0x4b, 0xa6, 0x64, + 0xd1, 0x0e, 0x64, 0x0f, 0xc5, 0xbe, 0x6c, 0x43, 0xd6, 0xc4, 0x09, 0x11, 0xfb, 0xfb, 0x2d, 0x18, + 0x97, 0x5f, 0x36, 0x20, 0xcf, 0x49, 0xb7, 0x56, 0xc2, 0x6f, 0x26, 0x5b, 0x8b, 0xf2, 0x8c, 0x0c, + 0x62, 0xb0, 0x8a, 0xc5, 0xa3, 0xb0, 0x8a, 0xf6, 0x0f, 0x17, 0x60, 0x52, 0x76, 0xa7, 0xde, 0xd9, + 0x8c, 0x48, 0x8c, 0x36, 0xa0, 0xec, 0xf0, 0x21, 0x27, 0x72, 0xc5, 0x3e, 0x91, 0x2d, 0x14, 0x30, + 0xe6, 0x27, 0xb9, 0xbd, 0x17, 0x65, 0x6d, 0x9c, 0x10, 0x42, 0x4d, 0x98, 0xf1, 0x83, 0x98, 0x9d, + 0xe4, 0x0a, 0xde, 0x4b, 0xf5, 0x92, 0xa6, 0x7e, 0x4e, 0x50, 0x9f, 0x59, 0x4f, 0x53, 0xc1, 0xdd, + 0x84, 0xd1, 0x8a, 0x14, 0xb4, 0x14, 0xf3, 0x5f, 0xf6, 0xfa, 0x2c, 0x64, 0xcb, 0x59, 0xec, 0x5f, + 0xb1, 0xa0, 0x2c, 0xd1, 0x4e, 0x42, 0xcb, 0xb6, 0x06, 0xa3, 0x11, 0x9b, 0x04, 0x39, 0x34, 0x76, + 0xaf, 0x8e, 0xf3, 0xf9, 0x4a, 0x2e, 0x28, 0xfe, 0x3f, 0xc2, 0x92, 0x06, 0x93, 0xb3, 0xab, 0xee, + 0xbf, 0x4f, 0xe4, 0xec, 0xaa, 0x3f, 0x39, 0x37, 0xcc, 0x1f, 0xb0, 0x3e, 0x6b, 0x82, 0x2b, 0xca, + 0x47, 0xb5, 0x43, 0xb2, 0xe5, 0xdd, 0x4b, 0xf3, 0x51, 0x35, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x05, + 0xe3, 0x0d, 0x29, 0x60, 0x4d, 0xb6, 0xeb, 0xa5, 0x9e, 0xc2, 0x7e, 0xa5, 0x17, 0xe2, 0x82, 0x8d, + 0x65, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x6a, 0xfe, 0x62, 0x3f, 0x35, 0x7f, 0x42, 0x37, 0x5f, 0xe9, + 0xfd, 0xa3, 0x16, 0x8c, 0x70, 0xc1, 0xda, 0x60, 0x72, 0x4d, 0x4d, 0x4d, 0x96, 0x8c, 0xdd, 0x6d, + 0x5a, 0x28, 0xd4, 0x5e, 0x68, 0x0d, 0xca, 0xec, 0x07, 0x13, 0x0c, 0x16, 0xf3, 0xad, 0xe2, 0x79, + 0xab, 0x7a, 0x07, 0x6f, 0xcb, 0x6a, 0x38, 0xa1, 0x60, 0xff, 0x60, 0x91, 0x1e, 0x55, 0x09, 0xaa, + 0x71, 0x83, 0x5b, 0x0f, 0xef, 0x06, 0x2f, 0x3c, 0xac, 0x1b, 0x7c, 0x1b, 0xa6, 0x1a, 0x9a, 0x52, + 0x2d, 0x99, 0xc9, 0xcb, 0x3d, 0x17, 0x89, 0xa6, 0x7f, 0xe3, 0x22, 0x93, 0x65, 0x93, 0x08, 0x4e, + 0x53, 0x45, 0xdf, 0x0c, 0xe3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x52, 0xe2, 0x23, 0xf9, 0xeb, 0x45, + 0x6f, 0x82, 0x8b, 0xd8, 0xb4, 0xea, 0xd8, 0x20, 0x66, 0xff, 0xb1, 0x05, 0x68, 0xa5, 0xbd, 0x43, + 0x5a, 0x24, 0x74, 0x9a, 0x89, 0x6c, 0xfc, 0x2f, 0x5b, 0x30, 0x4b, 0xba, 0x8a, 0x97, 0x83, 0x56, + 0x4b, 0xbc, 0x40, 0x72, 0x1e, 0xc9, 0x2b, 0x39, 0x75, 0x94, 0xdb, 0xc0, 0x6c, 0x1e, 0x06, 0xce, + 0x6d, 0x0f, 0xad, 0xc1, 0x29, 0x7e, 0xe5, 0x29, 0x80, 0x66, 0x1b, 0xfd, 0xa8, 0x20, 0x7c, 0x6a, + 0xa3, 0x1b, 0x05, 0x67, 0xd5, 0xb3, 0xbf, 0x73, 0x1c, 0x72, 0x7b, 0xf1, 0x81, 0x52, 0xe0, 0x03, + 0xa5, 0xc0, 0x07, 0x4a, 0x81, 0x0f, 0x94, 0x02, 0x1f, 0x28, 0x05, 0xbe, 0xee, 0x95, 0x02, 0x7f, + 0x68, 0xc1, 0xa9, 0xee, 0x6b, 0xe0, 0x24, 0x18, 0xf3, 0x0e, 0x9c, 0xea, 0xbe, 0xeb, 0x7a, 0xda, + 0xc1, 0x75, 0xf7, 0x33, 0xb9, 0xf7, 0x32, 0xbe, 0x01, 0x67, 0xd1, 0xb7, 0x7f, 0xb1, 0x04, 0xc3, + 0x2b, 0x7b, 0xc4, 0x8f, 0x4f, 0xe0, 0x13, 0x1b, 0x30, 0xe9, 0xf9, 0x7b, 0x41, 0x73, 0x8f, 0xb8, + 0x1c, 0x7e, 0x94, 0xf7, 0xee, 0x59, 0x41, 0x7a, 0xb2, 0x6a, 0x90, 0xc0, 0x29, 0x92, 0x0f, 0x43, + 0xe6, 0x7c, 0x15, 0x46, 0xf8, 0xed, 0x20, 0x04, 0xce, 0x99, 0x97, 0x01, 0x1b, 0x44, 0x71, 0xe7, + 0x25, 0xf2, 0x70, 0x7e, 0xfb, 0x88, 0xea, 0xe8, 0x6d, 0x98, 0xdc, 0xf2, 0xc2, 0x28, 0xde, 0xf0, + 0x5a, 0x24, 0x8a, 0x9d, 0x56, 0xfb, 0x01, 0x64, 0xcc, 0x6a, 0x1c, 0x56, 0x0d, 0x4a, 0x38, 0x45, + 0x19, 0x6d, 0xc3, 0x44, 0xd3, 0xd1, 0x9b, 0x1a, 0x3d, 0x72, 0x53, 0xea, 0xda, 0xb9, 0xa1, 0x13, + 0xc2, 0x26, 0x5d, 0xba, 0x4f, 0x1b, 0x4c, 0x4c, 0x5a, 0x62, 0xc2, 0x03, 0xb5, 0x4f, 0xb9, 0x7c, + 0x94, 0xc3, 0x28, 0x07, 0xc5, 0x2c, 0x63, 0xcb, 0x26, 0x07, 0xa5, 0xd9, 0xbf, 0x7e, 0x1e, 0xca, + 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x6e, 0xae, 0x2b, 0x83, 0xf5, 0x75, 0xcd, 0x6b, 0x84, 0x81, 0x29, + 0xdd, 0x5f, 0x91, 0x94, 0x70, 0x42, 0x14, 0x2d, 0xc3, 0x48, 0x44, 0x42, 0x8f, 0x44, 0xe2, 0x0e, + 0xeb, 0x31, 0x8d, 0x0c, 0x8d, 0x3b, 0x95, 0xf0, 0xdf, 0x58, 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0x82, + 0x4f, 0x76, 0xcb, 0x68, 0xcb, 0x6b, 0x91, 0x95, 0x62, 0x01, 0x45, 0xaf, 0xc3, 0x68, 0x48, 0x9a, + 0x4c, 0x7d, 0x34, 0x31, 0xf8, 0x22, 0xe7, 0xda, 0x28, 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x3a, 0xa0, + 0x90, 0x50, 0x0e, 0xcc, 0xf3, 0xb7, 0x95, 0xbd, 0xa8, 0x38, 0xc1, 0xd5, 0x8e, 0xc7, 0x09, 0x86, + 0xf4, 0xef, 0xc1, 0x19, 0xd5, 0xd0, 0x55, 0x98, 0x51, 0xa5, 0x55, 0x3f, 0x8a, 0x1d, 0x7a, 0x72, + 0x4e, 0x31, 0x5a, 0x4a, 0x00, 0x82, 0xd3, 0x08, 0xb8, 0xbb, 0x8e, 0xfd, 0x53, 0x16, 0xf0, 0x71, + 0x3e, 0x81, 0x67, 0xff, 0x6b, 0xe6, 0xb3, 0xff, 0x5c, 0xee, 0xcc, 0xe5, 0x3c, 0xf9, 0xef, 0x5b, + 0x30, 0xa6, 0xcd, 0x6c, 0xb2, 0x66, 0xad, 0x1e, 0x6b, 0xb6, 0x03, 0xd3, 0x74, 0xa5, 0xdf, 0xdc, + 0x8c, 0x48, 0xb8, 0x47, 0x5c, 0xb6, 0x30, 0x0b, 0x0f, 0xb6, 0x30, 0x95, 0x21, 0xdb, 0x8d, 0x14, + 0x41, 0xdc, 0xd5, 0x04, 0x7a, 0x59, 0xea, 0x52, 0x8a, 0x86, 0x1d, 0x38, 0xd7, 0x93, 0x1c, 0x1e, + 0xcc, 0x4f, 0x6b, 0x1f, 0xa2, 0xeb, 0x4e, 0xec, 0xcf, 0xcb, 0x6f, 0x54, 0x06, 0x83, 0x0d, 0xb5, + 0x58, 0x52, 0x06, 0x83, 0x6a, 0x39, 0xe0, 0x04, 0x87, 0xee, 0xd1, 0x9d, 0x20, 0x8a, 0xd3, 0x06, + 0x83, 0xd7, 0x82, 0x28, 0xc6, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x72, 0x8f, 0x34, 0xf8, 0x52, 0xd7, + 0x9f, 0x33, 0x56, 0xfe, 0x73, 0xc6, 0xfe, 0x0f, 0x16, 0x4c, 0xae, 0x2e, 0x1b, 0x12, 0xe1, 0x05, + 0x00, 0xfe, 0x06, 0xbb, 0x73, 0x67, 0x5d, 0x6a, 0xdb, 0xb9, 0xc2, 0x54, 0x95, 0x62, 0x0d, 0x03, + 0x9d, 0x83, 0x62, 0xb3, 0xe3, 0x0b, 0xe9, 0xe4, 0x28, 0xbd, 0xb0, 0x6f, 0x74, 0x7c, 0x4c, 0xcb, + 0x34, 0x27, 0x84, 0xe2, 0xc0, 0x4e, 0x08, 0x7d, 0x83, 0x01, 0xa0, 0x79, 0x18, 0xbe, 0x7b, 0xd7, + 0x73, 0xb9, 0xcb, 0xa5, 0xb0, 0x04, 0xb8, 0x73, 0xa7, 0x5a, 0x89, 0x30, 0x2f, 0xb7, 0xbf, 0x54, + 0x84, 0xb9, 0xd5, 0x26, 0xb9, 0xf7, 0x1e, 0xdd, 0x4e, 0x07, 0x75, 0xa1, 0x38, 0x9a, 0x68, 0xe8, + 0xa8, 0x6e, 0x32, 0xfd, 0xc7, 0x63, 0x0b, 0x46, 0xb9, 0xbd, 0x9c, 0x74, 0x42, 0x7d, 0x35, 0xab, + 0xf5, 0xfc, 0x01, 0x59, 0xe0, 0x76, 0x77, 0xc2, 0x87, 0x4e, 0xdd, 0xb4, 0xa2, 0x14, 0x4b, 0xe2, + 0x73, 0x9f, 0x80, 0x71, 0x1d, 0xf3, 0x48, 0x0e, 0x6b, 0x7f, 0xa9, 0x08, 0xd3, 0xb4, 0x07, 0x0f, + 0x75, 0x22, 0x6e, 0x75, 0x4f, 0xc4, 0x71, 0x3b, 0x2d, 0xf5, 0x9f, 0x8d, 0xb7, 0xd2, 0xb3, 0xf1, + 0x5c, 0xde, 0x6c, 0x9c, 0xf4, 0x1c, 0x7c, 0x87, 0x05, 0xa7, 0x56, 0x9b, 0x41, 0x63, 0x37, 0xe5, + 0x58, 0xf4, 0x12, 0x8c, 0xd1, 0x73, 0x3c, 0x32, 0x7c, 0xde, 0x8d, 0x28, 0x08, 0x02, 0x84, 0x75, + 0x3c, 0xad, 0xda, 0xad, 0x5b, 0xd5, 0x4a, 0x56, 0xf0, 0x04, 0x01, 0xc2, 0x3a, 0x9e, 0xfd, 0x9b, + 0x16, 0x9c, 0xbf, 0xba, 0xbc, 0x92, 0x2c, 0xc5, 0xae, 0xf8, 0x0d, 0x97, 0x60, 0xa4, 0xed, 0x6a, + 0x5d, 0x49, 0x04, 0xbe, 0x15, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0xc4, 0x26, 0xf9, 0x49, 0x0b, 0x4e, + 0x5d, 0xf5, 0x62, 0x7a, 0x2d, 0xa7, 0x23, 0x09, 0xd0, 0x7b, 0x39, 0xf2, 0xe2, 0x20, 0xdc, 0x4f, + 0x47, 0x12, 0xc0, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xc7, 0x2c, 0xb5, 0x0b, 0xa6, 0x1e, + 0x0b, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb9, 0x5e, 0xc8, 0xa4, 0x86, 0xfb, 0xe2, 0x84, 0x55, + 0x1f, 0x56, 0x91, 0x00, 0x9c, 0xe0, 0xd0, 0x07, 0xd4, 0xfc, 0xd5, 0x66, 0x27, 0x8a, 0x49, 0xb8, + 0x15, 0xe5, 0x9c, 0x8e, 0x2f, 0x40, 0x99, 0x48, 0x19, 0xbd, 0xe8, 0xb5, 0x62, 0x35, 0x95, 0xf0, + 0x9e, 0x07, 0x34, 0x50, 0x78, 0x03, 0xb8, 0x29, 0x1e, 0xcd, 0xcf, 0x6c, 0x15, 0x10, 0xd1, 0xdb, + 0xd2, 0x23, 0x3c, 0x30, 0x57, 0xf1, 0x95, 0x2e, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x88, 0x05, 0x67, + 0xd4, 0x07, 0xbf, 0xef, 0x3e, 0xd3, 0xfe, 0xb9, 0x02, 0x4c, 0x5c, 0xdb, 0xd8, 0xa8, 0x5d, 0x25, + 0xb1, 0xb8, 0xb6, 0xfb, 0xab, 0xd1, 0xb1, 0xa6, 0x0d, 0xec, 0xf5, 0x0a, 0xec, 0xc4, 0x5e, 0x73, + 0x81, 0x07, 0x0a, 0x5a, 0xa8, 0xfa, 0xf1, 0xcd, 0xb0, 0x1e, 0x87, 0x9e, 0xbf, 0x9d, 0xa9, 0x3f, + 0x94, 0xcc, 0x45, 0x31, 0x8f, 0xb9, 0x40, 0x2f, 0xc0, 0x08, 0x8b, 0x54, 0x24, 0x27, 0xe1, 0x51, + 0xf5, 0x88, 0x62, 0xa5, 0x87, 0x07, 0xf3, 0xe5, 0x5b, 0xb8, 0xca, 0xff, 0x60, 0x81, 0x8a, 0x6e, + 0xc1, 0xd8, 0x4e, 0x1c, 0xb7, 0xaf, 0x11, 0xc7, 0xa5, 0xaf, 0x65, 0x7e, 0x1c, 0x5e, 0xc8, 0x3a, + 0x0e, 0xe9, 0x20, 0x70, 0xb4, 0xe4, 0x04, 0x49, 0xca, 0x22, 0xac, 0xd3, 0xb1, 0xeb, 0x00, 0x09, + 0xec, 0x98, 0x74, 0x27, 0xf6, 0xef, 0x5b, 0x30, 0xca, 0x83, 0x46, 0x84, 0xe8, 0x93, 0x30, 0x44, + 0xee, 0x91, 0x86, 0x60, 0x95, 0x33, 0x3b, 0x9c, 0x70, 0x5a, 0x5c, 0x06, 0x4c, 0xff, 0x63, 0x56, + 0x0b, 0x5d, 0x83, 0x51, 0xda, 0xdb, 0xab, 0x2a, 0x82, 0xc6, 0xe3, 0x79, 0x5f, 0xac, 0xa6, 0x9d, + 0x33, 0x67, 0xa2, 0x08, 0xcb, 0xea, 0x4c, 0xfb, 0xdc, 0x68, 0xd7, 0xe9, 0x89, 0x1d, 0xf7, 0x62, + 0x2c, 0x36, 0x96, 0x6b, 0x1c, 0x49, 0x50, 0xe3, 0xda, 0x67, 0x59, 0x88, 0x13, 0x22, 0xf6, 0x06, + 0x94, 0xe9, 0xa4, 0x2e, 0x36, 0x3d, 0xa7, 0xb7, 0x42, 0xfd, 0x69, 0x28, 0x4b, 0x75, 0x79, 0x24, + 0x9c, 0xc5, 0x19, 0x55, 0xa9, 0x4d, 0x8f, 0x70, 0x02, 0xb7, 0xb7, 0xe0, 0x34, 0x33, 0x7e, 0x74, + 0xe2, 0x1d, 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0xcf, 0x88, 0x97, 0x27, 0x9f, 0x99, 0x59, 0xcd, 0x1f, + 0x73, 0x5c, 0x52, 0x4c, 0x5e, 0xa1, 0xf6, 0x57, 0x87, 0xe0, 0xd1, 0x6a, 0x3d, 0x3f, 0x9e, 0xc8, + 0x2b, 0x30, 0xce, 0xf9, 0x52, 0xba, 0xb4, 0x9d, 0xa6, 0x68, 0x57, 0x09, 0x7f, 0x37, 0x34, 0x18, + 0x36, 0x30, 0xd1, 0x79, 0x28, 0x7a, 0xef, 0xf8, 0x69, 0xd7, 0xa6, 0xea, 0x1b, 0xeb, 0x98, 0x96, + 0x53, 0x30, 0x65, 0x71, 0xf9, 0xdd, 0xa1, 0xc0, 0x8a, 0xcd, 0x7d, 0x0d, 0x26, 0xbd, 0xa8, 0x11, + 0x79, 0x55, 0x9f, 0x9e, 0x33, 0xda, 0x49, 0xa5, 0xa4, 0x22, 0xb4, 0xd3, 0x0a, 0x8a, 0x53, 0xd8, + 0xda, 0x45, 0x36, 0x3c, 0x30, 0x9b, 0xdc, 0xd7, 0x7b, 0x9a, 0xbe, 0x00, 0xda, 0xec, 0xeb, 0x22, + 0x26, 0xc5, 0x17, 0x2f, 0x00, 0xfe, 0xc1, 0x11, 0x96, 0x30, 0xfa, 0xe4, 0x6c, 0xec, 0x38, 0xed, + 0xc5, 0x4e, 0xbc, 0x53, 0xf1, 0xa2, 0x46, 0xb0, 0x47, 0xc2, 0x7d, 0x26, 0x2d, 0x28, 0x25, 0x4f, + 0x4e, 0x05, 0x58, 0xbe, 0xb6, 0x58, 0xa3, 0x98, 0xb8, 0xbb, 0x0e, 0x5a, 0x84, 0x29, 0x59, 0x58, + 0x27, 0x11, 0xbb, 0xc2, 0xc6, 0x18, 0x19, 0xe5, 0x6c, 0x24, 0x8a, 0x15, 0x91, 0x34, 0xbe, 0xc9, + 0x49, 0xc3, 0x71, 0x70, 0xd2, 0x2f, 0xc3, 0x84, 0xe7, 0x7b, 0xb1, 0xe7, 0xc4, 0x01, 0x57, 0x41, + 0x71, 0xc1, 0x00, 0x93, 0xad, 0x57, 0x75, 0x00, 0x36, 0xf1, 0xec, 0xff, 0x36, 0x04, 0x33, 0x6c, + 0xda, 0x3e, 0x58, 0x61, 0x5f, 0x4f, 0x2b, 0xec, 0x56, 0xf7, 0x0a, 0x3b, 0x8e, 0x27, 0xc2, 0x03, + 0x2f, 0xb3, 0xb7, 0xa1, 0xac, 0xfc, 0xab, 0xa4, 0x83, 0xa5, 0x95, 0xe3, 0x60, 0xd9, 0x9f, 0xfb, + 0x90, 0x26, 0x6a, 0xc5, 0x4c, 0x13, 0xb5, 0xbf, 0x65, 0x41, 0xa2, 0x53, 0x41, 0xd7, 0xa0, 0xdc, + 0x0e, 0x98, 0xe5, 0x65, 0x28, 0xcd, 0x99, 0x1f, 0xcd, 0xbc, 0xa8, 0xf8, 0xa5, 0xc8, 0x3f, 0xbe, + 0x26, 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x82, 0xd1, 0x76, 0x48, 0xea, 0x31, 0x0b, 0x2b, 0xd2, 0x97, + 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0xcf, 0x5b, 0x00, 0xdc, 0x0a, 0xcc, 0xf1, 0xb7, + 0xc9, 0x09, 0x88, 0xbb, 0x2b, 0x30, 0x14, 0xb5, 0x49, 0xa3, 0x97, 0x4d, 0x6c, 0xd2, 0x9f, 0x7a, + 0x9b, 0x34, 0x92, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x17, 0xc0, 0x64, 0x82, 0x56, 0x8d, + 0x49, 0x0b, 0x3d, 0x6b, 0x84, 0x19, 0x38, 0x97, 0x0a, 0x33, 0x50, 0x66, 0xd8, 0x9a, 0x64, 0xf5, + 0x6d, 0x28, 0xb6, 0x9c, 0x7b, 0x42, 0x74, 0xf6, 0x74, 0xef, 0x6e, 0x50, 0xfa, 0x0b, 0x6b, 0xce, + 0x3d, 0xfe, 0x48, 0x7c, 0x5a, 0x2e, 0x90, 0x35, 0xe7, 0xde, 0x21, 0xb7, 0x7c, 0x65, 0x87, 0xd4, + 0x0d, 0x2f, 0x8a, 0xbf, 0xf0, 0x5f, 0x93, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, 0x5b, 0x9e, 0x2f, + 0x6c, 0xa2, 0x06, 0x6a, 0xcb, 0xf3, 0xd3, 0x6d, 0x79, 0xfe, 0x00, 0x6d, 0x79, 0x3e, 0x7a, 0x17, + 0x46, 0x85, 0xfd, 0xa1, 0x08, 0xeb, 0x73, 0x65, 0x80, 0xf6, 0x84, 0xf9, 0x22, 0x6f, 0xf3, 0x8a, + 0x7c, 0x04, 0x8b, 0xd2, 0xbe, 0xed, 0xca, 0x06, 0xd1, 0x5f, 0xb7, 0x60, 0x52, 0xfc, 0xc6, 0xe4, + 0x9d, 0x0e, 0x89, 0x62, 0xc1, 0x7b, 0x7e, 0x7c, 0xf0, 0x3e, 0x88, 0x8a, 0xbc, 0x2b, 0x1f, 0x97, + 0xc7, 0xac, 0x09, 0xec, 0xdb, 0xa3, 0x54, 0x2f, 0xd0, 0x3f, 0xb0, 0xe0, 0x74, 0xcb, 0xb9, 0xc7, + 0x5b, 0xe4, 0x65, 0xd8, 0x89, 0xbd, 0x40, 0xa8, 0xfe, 0x3f, 0x39, 0xd8, 0xf4, 0x77, 0x55, 0xe7, + 0x9d, 0x94, 0xfa, 0xc9, 0xd3, 0x59, 0x28, 0x7d, 0xbb, 0x9a, 0xd9, 0xaf, 0xb9, 0x2d, 0x28, 0xc9, + 0xf5, 0x96, 0x21, 0x6a, 0xa8, 0xe8, 0x8c, 0xf5, 0x91, 0xcd, 0x3f, 0x75, 0x5f, 0x7f, 0xda, 0x8e, + 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0xb7, 0x61, 0x5c, 0x5f, 0x63, 0x0f, 0xb5, 0xad, 0x77, 0xe0, 0x54, + 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x85, 0x73, 0xb9, 0xeb, 0xe3, 0x61, 0x36, 0x6c, 0xff, 0x9c, + 0xa5, 0x9f, 0x83, 0x27, 0xa0, 0x73, 0x58, 0x36, 0x75, 0x0e, 0x17, 0x7a, 0xef, 0x9c, 0x1c, 0xc5, + 0xc3, 0x5b, 0x7a, 0xa7, 0xe9, 0xa9, 0x8e, 0x5e, 0x87, 0x91, 0x26, 0x2d, 0x91, 0x86, 0xaf, 0x76, + 0xff, 0x1d, 0x99, 0xf0, 0x52, 0xac, 0x3c, 0xc2, 0x82, 0x82, 0xfd, 0x4b, 0x16, 0x0c, 0x9d, 0xc0, + 0x48, 0x60, 0x73, 0x24, 0x9e, 0xcd, 0x25, 0x2d, 0x22, 0x0e, 0x2f, 0x60, 0xe7, 0xee, 0xca, 0xbd, + 0x98, 0xf8, 0x11, 0x7b, 0x2a, 0x66, 0x0e, 0xcc, 0xb7, 0xc0, 0xa9, 0x1b, 0x81, 0xe3, 0x2e, 0x39, + 0x4d, 0xc7, 0x6f, 0x90, 0xb0, 0xea, 0x6f, 0x1f, 0xc9, 0x02, 0xbb, 0xd0, 0xcf, 0x02, 0xdb, 0xde, + 0x01, 0xa4, 0x37, 0x20, 0x5c, 0x59, 0x30, 0x8c, 0x7a, 0xbc, 0x29, 0x31, 0xfc, 0x4f, 0x66, 0xb3, + 0x66, 0x5d, 0x3d, 0xd3, 0x9c, 0x34, 0x78, 0x01, 0x96, 0x84, 0xec, 0x57, 0x20, 0xd3, 0x1f, 0xbe, + 0xbf, 0xd8, 0xc0, 0xfe, 0x0c, 0xcc, 0xb0, 0x9a, 0x47, 0x7c, 0xd2, 0xda, 0x29, 0xa9, 0x64, 0x46, + 0xf0, 0x3b, 0xfb, 0x8b, 0x16, 0x4c, 0xad, 0xa7, 0x62, 0x82, 0x5d, 0x62, 0x0a, 0xd0, 0x0c, 0x61, + 0x78, 0x9d, 0x95, 0x62, 0x01, 0x3d, 0x76, 0x19, 0xd4, 0x9f, 0x5b, 0x90, 0x84, 0xa8, 0x38, 0x01, + 0xc6, 0x6b, 0xd9, 0x60, 0xbc, 0x32, 0x65, 0x23, 0xaa, 0x3b, 0x79, 0x7c, 0x17, 0xba, 0xae, 0xe2, + 0x31, 0xf5, 0x10, 0x8b, 0x24, 0x64, 0x78, 0xf4, 0x9e, 0x49, 0x33, 0x68, 0x93, 0x8c, 0xd0, 0x64, + 0xff, 0xe7, 0x02, 0x20, 0x85, 0x3b, 0x70, 0xbc, 0xa8, 0xee, 0x1a, 0xc7, 0x13, 0x2f, 0x6a, 0x0f, + 0x10, 0x53, 0xe1, 0x87, 0x8e, 0x1f, 0x71, 0xb2, 0x9e, 0x90, 0xba, 0x1d, 0xcd, 0x3e, 0x60, 0x4e, + 0x34, 0x89, 0x6e, 0x74, 0x51, 0xc3, 0x19, 0x2d, 0x68, 0xa6, 0x19, 0xc3, 0x83, 0x9a, 0x66, 0x8c, + 0xf4, 0x71, 0x57, 0xfb, 0x59, 0x0b, 0x26, 0xd4, 0x30, 0xbd, 0x4f, 0xec, 0xcf, 0x55, 0x7f, 0x72, + 0x8e, 0xbe, 0x9a, 0xd6, 0x65, 0x76, 0x25, 0x7c, 0x23, 0x73, 0x3b, 0x74, 0x9a, 0xde, 0xbb, 0x44, + 0x45, 0xeb, 0x9b, 0x17, 0x6e, 0x84, 0xa2, 0xf4, 0xf0, 0x60, 0x7e, 0x42, 0xfd, 0xe3, 0xd1, 0x81, + 0x93, 0x2a, 0xf6, 0x8f, 0xd3, 0xcd, 0x6e, 0x2e, 0x45, 0xf4, 0x12, 0x0c, 0xb7, 0x77, 0x9c, 0x88, + 0xa4, 0x9c, 0x6e, 0x86, 0x6b, 0xb4, 0xf0, 0xf0, 0x60, 0x7e, 0x52, 0x55, 0x60, 0x25, 0x98, 0x63, + 0x0f, 0x1e, 0x85, 0xab, 0x7b, 0x71, 0xf6, 0x8d, 0xc2, 0xf5, 0xc7, 0x16, 0x0c, 0xad, 0x07, 0xee, + 0x49, 0x1c, 0x01, 0xaf, 0x19, 0x47, 0xc0, 0x63, 0x79, 0x81, 0xdb, 0x73, 0x77, 0xff, 0x6a, 0x6a, + 0xf7, 0x5f, 0xc8, 0xa5, 0xd0, 0x7b, 0xe3, 0xb7, 0x60, 0x8c, 0x85, 0x83, 0x17, 0x0e, 0x46, 0x2f, + 0x18, 0x1b, 0x7e, 0x3e, 0xb5, 0xe1, 0xa7, 0x34, 0x54, 0x6d, 0xa7, 0x3f, 0x05, 0xa3, 0xc2, 0xc9, + 0x25, 0xed, 0xbd, 0x29, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x5a, 0x04, 0x23, 0xfc, 0x3c, 0xfa, 0x15, + 0x0b, 0x16, 0x42, 0x6e, 0xfc, 0xea, 0x56, 0x3a, 0xa1, 0xe7, 0x6f, 0xd7, 0x1b, 0x3b, 0xc4, 0xed, + 0x34, 0x3d, 0x7f, 0xbb, 0xba, 0xed, 0x07, 0xaa, 0x78, 0xe5, 0x1e, 0x69, 0x74, 0x98, 0xfa, 0xaa, + 0x4f, 0xac, 0x7b, 0x65, 0x44, 0xfe, 0xfc, 0xfd, 0x83, 0xf9, 0x05, 0x7c, 0x24, 0xda, 0xf8, 0x88, + 0x7d, 0x41, 0xbf, 0x69, 0xc1, 0x15, 0x1e, 0x95, 0x7d, 0xf0, 0xfe, 0xf7, 0x78, 0xe7, 0xd6, 0x24, + 0xa9, 0x84, 0xc8, 0x06, 0x09, 0x5b, 0x4b, 0x2f, 0x8b, 0x01, 0xbd, 0x52, 0x3b, 0x5a, 0x5b, 0xf8, + 0xa8, 0x9d, 0xb3, 0xff, 0x45, 0x11, 0x26, 0x44, 0x68, 0x27, 0x71, 0x07, 0xbc, 0x64, 0x2c, 0x89, + 0xc7, 0x53, 0x4b, 0x62, 0xc6, 0x40, 0x3e, 0x9e, 0xe3, 0x3f, 0x82, 0x19, 0x7a, 0x38, 0x5f, 0x23, + 0x4e, 0x18, 0x6f, 0x12, 0x87, 0x5b, 0x5c, 0x15, 0x8f, 0x7c, 0xfa, 0x2b, 0xc1, 0xda, 0x8d, 0x34, + 0x31, 0xdc, 0x4d, 0xff, 0xeb, 0xe9, 0xce, 0xf1, 0x61, 0xba, 0x2b, 0x3a, 0xd7, 0x9b, 0x50, 0x56, + 0x1e, 0x1a, 0xe2, 0xd0, 0xe9, 0x1d, 0xe4, 0x2e, 0x4d, 0x81, 0x0b, 0xbf, 0x12, 0xef, 0xa0, 0x84, + 0x9c, 0xfd, 0x0f, 0x0b, 0x46, 0x83, 0x7c, 0x12, 0xd7, 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xed, 0x13, + 0x57, 0xec, 0xd8, 0x0f, 0xe7, 0xed, 0x58, 0xa3, 0x19, 0xe6, 0x25, 0xb3, 0x28, 0x6a, 0x62, 0x45, + 0x03, 0x5d, 0xe3, 0x76, 0x6d, 0x7b, 0xf2, 0xa5, 0x36, 0x18, 0x35, 0x90, 0x96, 0x6f, 0x7b, 0x04, + 0x8b, 0xfa, 0xe8, 0xb3, 0xdc, 0xf0, 0xf0, 0xba, 0x1f, 0xdc, 0xf5, 0xaf, 0x06, 0x81, 0x0c, 0x9f, + 0x30, 0x18, 0xc1, 0x19, 0x69, 0x6e, 0xa8, 0xaa, 0x63, 0x93, 0xda, 0x60, 0x11, 0x2c, 0xbf, 0x15, + 0x4e, 0x51, 0xd2, 0xa6, 0x77, 0x73, 0x84, 0x08, 0x4c, 0x89, 0xb8, 0x61, 0xb2, 0x4c, 0x8c, 0x5d, + 0xe6, 0x23, 0xcc, 0xac, 0x9d, 0x48, 0x80, 0xaf, 0x9b, 0x24, 0x70, 0x9a, 0xa6, 0xfd, 0x13, 0x16, + 0x30, 0x4f, 0xcf, 0x13, 0xe0, 0x47, 0x3e, 0x65, 0xf2, 0x23, 0xb3, 0x79, 0x83, 0x9c, 0xc3, 0x8a, + 0xbc, 0xc8, 0x57, 0x56, 0x2d, 0x0c, 0xee, 0xed, 0x0b, 0xa3, 0x8f, 0xfe, 0xef, 0x0f, 0xfb, 0xff, + 0x58, 0xfc, 0x10, 0x53, 0xfe, 0x13, 0xe8, 0xdb, 0xa0, 0xd4, 0x70, 0xda, 0x4e, 0x83, 0xe7, 0x4a, + 0xc9, 0x95, 0xc5, 0x19, 0x95, 0x16, 0x96, 0x45, 0x0d, 0x2e, 0x5b, 0x92, 0xf1, 0xe7, 0x4a, 0xb2, + 0xb8, 0xaf, 0x3c, 0x49, 0x35, 0x39, 0xb7, 0x0b, 0x13, 0x06, 0xb1, 0x87, 0x2a, 0x88, 0xf8, 0x36, + 0x7e, 0xc5, 0xaa, 0x78, 0x89, 0x2d, 0x98, 0xf1, 0xb5, 0xff, 0xf4, 0x42, 0x91, 0x8f, 0xcb, 0x0f, + 0xf7, 0xbb, 0x44, 0xd9, 0xed, 0xa3, 0xf9, 0x9d, 0xa6, 0xc8, 0xe0, 0x6e, 0xca, 0xf6, 0x8f, 0x59, + 0xf0, 0x88, 0x8e, 0xa8, 0xb9, 0xb6, 0xf4, 0x93, 0xee, 0x57, 0xa0, 0x14, 0xb4, 0x49, 0xe8, 0xc4, + 0x41, 0x28, 0x6e, 0x8d, 0xcb, 0x72, 0xd0, 0x6f, 0x8a, 0xf2, 0x43, 0x11, 0x69, 0x5c, 0x52, 0x97, + 0xe5, 0x58, 0xd5, 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0x91, 0x70, 0x62, 0x62, 0x67, 0x00, 0x53, 0x74, + 0x47, 0x58, 0x40, 0xec, 0xaf, 0x5a, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0xef, 0xc0, 0x74, 0xcb, 0x89, + 0x1b, 0x3b, 0x2b, 0xf7, 0xda, 0x21, 0xd7, 0x95, 0xc8, 0x71, 0x7a, 0xba, 0xdf, 0x38, 0x69, 0x1f, + 0x99, 0xd8, 0x52, 0xae, 0xa5, 0x88, 0xe1, 0x2e, 0xf2, 0x68, 0x13, 0xc6, 0x58, 0x19, 0xf3, 0xcf, + 0x8b, 0x7a, 0xb1, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, 0xb5, 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x3f, + 0x53, 0xe4, 0xbb, 0x9d, 0xb1, 0xf2, 0x4f, 0xc1, 0x68, 0x3b, 0x70, 0x97, 0xab, 0x15, 0x2c, 0x66, + 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x86, 0x92, 0xf8, 0x29, 0x75, 0x5b, 0xec, + 0x6c, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x3c, 0x40, 0x3b, 0x0c, 0xf6, 0x3c, 0x97, 0x05, 0x81, + 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0x26, 0x3a, 0x7e, 0xc4, 0xd9, + 0x11, 0x67, 0x53, 0x04, 0xe5, 0x2e, 0x25, 0x06, 0x28, 0xb7, 0x74, 0x20, 0x36, 0x71, 0xd1, 0x22, + 0x8c, 0xc4, 0x0e, 0x33, 0x5b, 0x19, 0xce, 0xb7, 0xb7, 0xdd, 0xa0, 0x18, 0x7a, 0x5a, 0x0e, 0x5a, + 0x01, 0x8b, 0x8a, 0xe8, 0x4d, 0xe9, 0x2a, 0xcb, 0x0f, 0x76, 0x61, 0xe8, 0x3e, 0xd8, 0x25, 0xa0, + 0x39, 0xca, 0x0a, 0x03, 0x7a, 0x83, 0x16, 0x7a, 0x15, 0x80, 0xdc, 0x8b, 0x49, 0xe8, 0x3b, 0x4d, + 0x65, 0x15, 0xa6, 0xec, 0xa0, 0x2b, 0xc1, 0x7a, 0x10, 0xdf, 0x8a, 0xc8, 0xb7, 0xac, 0x28, 0x14, + 0xac, 0xa1, 0xdb, 0xbf, 0x59, 0x06, 0x48, 0x18, 0x77, 0xf4, 0x6e, 0xd7, 0xc9, 0xf5, 0x4c, 0x6f, + 0x56, 0xff, 0xf8, 0x8e, 0x2d, 0xf4, 0xdd, 0x16, 0x8c, 0x39, 0xcd, 0x66, 0xd0, 0x70, 0x62, 0x36, + 0x45, 0x85, 0xde, 0x27, 0xa7, 0x68, 0x7f, 0x31, 0xa9, 0xc1, 0xbb, 0xf0, 0x82, 0x5c, 0xa2, 0x1a, + 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0xc7, 0xe4, 0x5b, 0x91, 0xaf, 0xad, 0xb9, 0xf4, 0x5b, 0xb1, + 0xcc, 0x2e, 0x09, 0xfd, 0x99, 0x78, 0xcb, 0x78, 0x26, 0x0e, 0xe5, 0x3b, 0x03, 0x1a, 0xfc, 0x6b, + 0xbf, 0x17, 0x22, 0xaa, 0xe9, 0x81, 0x01, 0x86, 0xf3, 0x3d, 0xef, 0xb4, 0x87, 0x52, 0x9f, 0xa0, + 0x00, 0x6f, 0xc3, 0x94, 0x6b, 0x72, 0x01, 0x62, 0x29, 0x3e, 0x99, 0x47, 0x37, 0xc5, 0x34, 0x24, + 0xf7, 0x7e, 0x0a, 0x80, 0xd3, 0x84, 0x51, 0x8d, 0x07, 0x7d, 0xa8, 0xfa, 0x5b, 0x81, 0xf0, 0xb6, + 0xb0, 0x73, 0xe7, 0x72, 0x3f, 0x8a, 0x49, 0x8b, 0x62, 0x26, 0xd7, 0xfb, 0xba, 0xa8, 0x8b, 0x15, + 0x15, 0xf4, 0x3a, 0x8c, 0x30, 0xd7, 0xab, 0x68, 0xb6, 0x94, 0x2f, 0x2c, 0x36, 0xa3, 0x98, 0x25, + 0x3b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x49, 0xc7, 0xc6, 0xa8, 0xea, 0xdf, 0x8a, 0x08, + 0x73, 0x6c, 0x2c, 0x2f, 0x7d, 0x38, 0xf1, 0x59, 0xe4, 0xe5, 0x99, 0xd9, 0xbb, 0x8c, 0x9a, 0x94, + 0x8d, 0x12, 0xff, 0x65, 0x52, 0xb0, 0x59, 0xc8, 0xef, 0x9e, 0x99, 0x38, 0x2c, 0x19, 0xce, 0xdb, + 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x59, 0x52, 0xbe, 0xed, 0x85, 0xbf, 0x46, 0xbf, 0xc3, 0x83, 0xbf, + 0xc4, 0xd9, 0x75, 0xc4, 0x4b, 0xb0, 0xa8, 0x7f, 0xa2, 0xfc, 0xc1, 0x9c, 0x0f, 0xd3, 0xe9, 0x2d, + 0xfa, 0x50, 0xf9, 0x91, 0xdf, 0x1f, 0x82, 0x49, 0x73, 0x49, 0xa1, 0x2b, 0x50, 0x16, 0x44, 0x54, + 0x20, 0x7f, 0xb5, 0x4b, 0xd6, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x7f, 0x03, 0xab, 0xae, 0xd9, 0xd9, + 0x26, 0xf9, 0x1b, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x65, 0xb5, 0x19, 0x04, 0xb1, 0xba, 0x91, 0xd4, + 0xba, 0x5b, 0x62, 0xa5, 0x58, 0x40, 0xe9, 0x4d, 0xb4, 0x4b, 0x42, 0x9f, 0x34, 0xcd, 0xf8, 0xc0, + 0xea, 0x26, 0xba, 0xae, 0x03, 0xb1, 0x89, 0x4b, 0xef, 0xd3, 0x20, 0x62, 0x0b, 0x59, 0xbc, 0xdf, + 0x12, 0xbb, 0xe5, 0x3a, 0xf7, 0xad, 0x96, 0x70, 0xf4, 0x19, 0x78, 0x44, 0xc5, 0x40, 0xc2, 0x5c, + 0x11, 0x21, 0x5b, 0x1c, 0x31, 0xc4, 0x2d, 0x8f, 0x2c, 0x67, 0xa3, 0xe1, 0xbc, 0xfa, 0xe8, 0x35, + 0x98, 0x14, 0x3c, 0xbe, 0xa4, 0x38, 0x6a, 0xda, 0xc6, 0x5c, 0x37, 0xa0, 0x38, 0x85, 0x2d, 0x23, + 0x1c, 0x33, 0x36, 0x5b, 0x52, 0x28, 0x75, 0x47, 0x38, 0xd6, 0xe1, 0xb8, 0xab, 0x06, 0x5a, 0x84, + 0x29, 0xce, 0x84, 0x79, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x4e, 0xa5, 0xb6, 0xd4, 0x4d, 0x13, 0x8c, + 0xd3, 0xf8, 0xe8, 0x15, 0x18, 0x77, 0xc2, 0xc6, 0x8e, 0x17, 0x93, 0x46, 0xdc, 0x09, 0xb9, 0x9f, + 0x95, 0x66, 0x5c, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x0b, 0xa7, 0x32, 0x82, 0x2e, 0xd0, + 0x85, 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0x0b, 0xe4, 0xc5, 0x5a, 0x55, 0x7e, 0x8d, 0x86, 0x45, + 0x57, 0x27, 0x0b, 0xce, 0xa0, 0xe5, 0x00, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x82, 0x63, 0xff, + 0xcf, 0x02, 0x4c, 0x65, 0x28, 0x57, 0x58, 0x1e, 0xba, 0xd4, 0x2b, 0x25, 0x49, 0x3b, 0x67, 0x06, + 0xcc, 0x2e, 0x1c, 0x21, 0x60, 0x76, 0xb1, 0x5f, 0xc0, 0xec, 0xa1, 0xf7, 0x12, 0x30, 0xdb, 0x1c, + 0xb1, 0xe1, 0x81, 0x46, 0x2c, 0x23, 0xc8, 0xf6, 0xc8, 0x11, 0x83, 0x6c, 0x1b, 0x83, 0x3e, 0x3a, + 0xc0, 0xa0, 0xff, 0x60, 0x01, 0xa6, 0xd3, 0x46, 0x90, 0x27, 0x20, 0xb8, 0x7d, 0xdd, 0x10, 0xdc, + 0x66, 0x67, 0x75, 0x4c, 0x9b, 0x66, 0xe6, 0x09, 0x71, 0x71, 0x4a, 0x88, 0xfb, 0xd1, 0x81, 0xa8, + 0xf5, 0x16, 0xe8, 0xfe, 0x9d, 0x02, 0x9c, 0x49, 0x57, 0x59, 0x6e, 0x3a, 0x5e, 0xeb, 0x04, 0xc6, + 0xe6, 0xa6, 0x31, 0x36, 0xcf, 0x0e, 0xf2, 0x35, 0xac, 0x6b, 0xb9, 0x03, 0x74, 0x27, 0x35, 0x40, + 0x57, 0x06, 0x27, 0xd9, 0x7b, 0x94, 0xbe, 0x52, 0x84, 0x0b, 0x99, 0xf5, 0x12, 0xb9, 0xe7, 0xaa, + 0x21, 0xf7, 0x7c, 0x3e, 0x25, 0xf7, 0xb4, 0x7b, 0xd7, 0x3e, 0x1e, 0x41, 0xa8, 0x70, 0x91, 0x65, + 0x11, 0x04, 0x1e, 0x50, 0x08, 0x6a, 0xb8, 0xc8, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xf5, 0x24, 0xfc, + 0xfc, 0x37, 0x16, 0x9c, 0xcb, 0x9c, 0x9b, 0x13, 0x10, 0x76, 0xad, 0x9b, 0xc2, 0xae, 0xa7, 0x06, + 0x5e, 0xad, 0x39, 0xd2, 0xaf, 0x5f, 0x1f, 0xca, 0xf9, 0x16, 0xf6, 0x94, 0xbf, 0x09, 0x63, 0x4e, + 0xa3, 0x41, 0xa2, 0x68, 0x2d, 0x70, 0x55, 0x4c, 0xe0, 0x67, 0xd9, 0x3b, 0x2b, 0x29, 0x3e, 0x3c, + 0x98, 0x9f, 0x4b, 0x93, 0x48, 0xc0, 0x58, 0xa7, 0x80, 0x3e, 0x0b, 0xa5, 0x48, 0xdc, 0x9b, 0x62, + 0xee, 0x5f, 0x18, 0x70, 0x70, 0x9c, 0x4d, 0xd2, 0x34, 0xe3, 0x1c, 0x29, 0x51, 0x85, 0x22, 0x69, + 0xc6, 0x44, 0x29, 0x1c, 0x6b, 0x4c, 0x94, 0xe7, 0x01, 0xf6, 0xd4, 0x63, 0x20, 0x2d, 0x80, 0xd0, + 0x9e, 0x09, 0x1a, 0x16, 0xfa, 0x26, 0x98, 0x8e, 0x78, 0x54, 0xbf, 0xe5, 0xa6, 0x13, 0x31, 0x3f, + 0x17, 0xb1, 0x0a, 0x59, 0x2c, 0xa5, 0x7a, 0x0a, 0x86, 0xbb, 0xb0, 0xd1, 0xaa, 0x6c, 0x95, 0x85, + 0x20, 0xe4, 0x0b, 0xf3, 0x52, 0xd2, 0xa2, 0xc8, 0x82, 0x7b, 0x3a, 0x3d, 0xfc, 0x6c, 0xe0, 0xb5, + 0x9a, 0xe8, 0xb3, 0x00, 0x74, 0xf9, 0x08, 0x41, 0xc4, 0x68, 0xfe, 0xe1, 0x49, 0x4f, 0x15, 0x37, + 0xd3, 0x2c, 0x97, 0x39, 0xa7, 0x56, 0x14, 0x11, 0xac, 0x11, 0xb4, 0x7f, 0x70, 0x08, 0x1e, 0xed, + 0x71, 0x46, 0xa2, 0x45, 0x53, 0x11, 0xfb, 0x74, 0xfa, 0x71, 0x3d, 0x97, 0x59, 0xd9, 0x78, 0x6d, + 0xa7, 0x96, 0x62, 0xe1, 0x3d, 0x2f, 0xc5, 0xef, 0xb3, 0x34, 0xb1, 0x07, 0x37, 0xd6, 0xfc, 0xd4, + 0x11, 0xcf, 0xfe, 0x63, 0x94, 0x83, 0x6c, 0x65, 0x08, 0x13, 0x9e, 0x1f, 0xb8, 0x3b, 0x03, 0x4b, + 0x17, 0x4e, 0x56, 0x4c, 0xfc, 0x05, 0x0b, 0x1e, 0xcf, 0xec, 0xaf, 0x61, 0x92, 0x73, 0x05, 0xca, + 0x0d, 0x5a, 0xa8, 0xf9, 0x22, 0x26, 0x4e, 0xda, 0x12, 0x80, 0x13, 0x1c, 0xc3, 0xf2, 0xa6, 0xd0, + 0xd7, 0xf2, 0xe6, 0x9f, 0x5b, 0xd0, 0xb5, 0x3f, 0x4e, 0xe0, 0xa0, 0xae, 0x9a, 0x07, 0xf5, 0x87, + 0x07, 0x99, 0xcb, 0x9c, 0x33, 0xfa, 0x8f, 0xa6, 0xe0, 0x6c, 0x8e, 0x2f, 0xce, 0x1e, 0xcc, 0x6c, + 0x37, 0x88, 0xe9, 0xe5, 0x29, 0x3e, 0x26, 0xd3, 0x21, 0xb6, 0xa7, 0x4b, 0x28, 0x4b, 0x69, 0x39, + 0xd3, 0x85, 0x82, 0xbb, 0x9b, 0x40, 0x5f, 0xb0, 0xe0, 0xb4, 0x73, 0x37, 0xea, 0xca, 0x81, 0x2f, + 0xd6, 0xcc, 0x8b, 0x99, 0x42, 0x90, 0x3e, 0x39, 0xf3, 0x79, 0x8e, 0xcf, 0x2c, 0x2c, 0x9c, 0xd9, + 0x16, 0xc2, 0x22, 0x4a, 0x3c, 0x65, 0xe7, 0x7b, 0xf8, 0x21, 0x67, 0x39, 0x4d, 0xf1, 0x1b, 0x44, + 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0f, 0xe5, 0x6d, 0xe9, 0xc9, 0x98, 0x71, 0x43, 0x25, 0x03, 0xd9, + 0xdb, 0xbf, 0x93, 0xab, 0x32, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa0, 0xe8, 0x6f, 0x45, 0xbd, + 0xd2, 0x64, 0xa6, 0x6c, 0xd6, 0xb8, 0xb7, 0xff, 0xfa, 0x6a, 0x1d, 0xd3, 0x8a, 0xe8, 0x1a, 0x14, + 0xc3, 0x4d, 0x57, 0x48, 0xf0, 0x32, 0xcf, 0x70, 0xbc, 0x54, 0xc9, 0xe9, 0x15, 0xa3, 0x84, 0x97, + 0x2a, 0x98, 0x92, 0x40, 0x35, 0x18, 0x66, 0x0e, 0x2c, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, 0x7b, 0x38, + 0x82, 0xf1, 0x90, 0x00, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x03, 0x46, 0x1a, 0x2c, 0xa5, 0xa2, 0x08, + 0x48, 0xf6, 0xb1, 0x4c, 0x59, 0x5d, 0x8f, 0x5c, 0x93, 0x42, 0x74, 0xc5, 0x30, 0xb0, 0xa0, 0xc5, + 0xa8, 0x92, 0xf6, 0xce, 0x56, 0x24, 0x52, 0x00, 0x67, 0x53, 0xed, 0x91, 0x42, 0x55, 0x50, 0x65, + 0x18, 0x58, 0xd0, 0x42, 0x9f, 0x80, 0xc2, 0x56, 0x43, 0x38, 0xa7, 0x64, 0x0a, 0xed, 0xcc, 0x80, + 0x0d, 0x4b, 0x23, 0xf7, 0x0f, 0xe6, 0x0b, 0xab, 0xcb, 0xb8, 0xb0, 0xd5, 0x40, 0xeb, 0x30, 0xba, + 0xc5, 0x5d, 0xbc, 0x85, 0x5c, 0xee, 0xc9, 0x6c, 0xef, 0xf3, 0x2e, 0x2f, 0x70, 0xee, 0x97, 0x21, + 0x00, 0x58, 0x12, 0x61, 0x41, 0xd7, 0x95, 0xab, 0xba, 0x88, 0xdd, 0xb5, 0x70, 0xb4, 0xf0, 0x02, + 0xfc, 0x7e, 0x4e, 0x1c, 0xde, 0xb1, 0x46, 0x91, 0xae, 0x6a, 0x47, 0xe6, 0x61, 0x17, 0xb1, 0x58, + 0x32, 0x57, 0x75, 0x9f, 0x14, 0xf5, 0x7c, 0x55, 0x2b, 0x24, 0x9c, 0x10, 0x45, 0xbb, 0x30, 0xb1, + 0x17, 0xb5, 0x77, 0x88, 0xdc, 0xd2, 0x2c, 0x34, 0x4b, 0xce, 0x15, 0x76, 0x5b, 0x20, 0x7a, 0x61, + 0xdc, 0x71, 0x9a, 0x5d, 0xa7, 0x10, 0xd3, 0x7f, 0xdf, 0xd6, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, + 0x77, 0x3a, 0xc1, 0xe6, 0x7e, 0x4c, 0x44, 0xc8, 0xad, 0xcc, 0xe1, 0x7f, 0x83, 0xa3, 0x74, 0x0f, + 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x16, 0xc3, 0xc3, 0x4e, 0xcf, 0xe9, 0xfc, 0xb8, 0x98, 0x8b, + 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x99, 0x90, 0x62, 0xa7, 0x64, 0x7b, 0x27, 0x88, 0x03, 0x3f, + 0x75, 0x42, 0xcf, 0xe4, 0x9f, 0x92, 0xb5, 0x0c, 0xfc, 0xee, 0x53, 0x32, 0x0b, 0x0b, 0x67, 0xb6, + 0x85, 0x5c, 0x98, 0x6c, 0x07, 0x61, 0x7c, 0x37, 0x08, 0xe5, 0xfa, 0x42, 0x3d, 0xe4, 0x0a, 0x06, + 0xa6, 0x68, 0x91, 0x45, 0xb3, 0x33, 0x21, 0x38, 0x45, 0x13, 0x7d, 0x1a, 0x46, 0xa3, 0x86, 0xd3, + 0x24, 0xd5, 0x9b, 0xb3, 0xa7, 0xf2, 0xaf, 0x9f, 0x3a, 0x47, 0xc9, 0x59, 0x5d, 0x3c, 0x42, 0x3b, + 0x47, 0xc1, 0x92, 0x1c, 0x5a, 0x85, 0x61, 0x96, 0x54, 0x8b, 0xc5, 0x87, 0xcb, 0x09, 0xef, 0xd9, + 0x65, 0x41, 0xcc, 0xcf, 0x26, 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0x04, 0x7b, 0x1d, 0x44, 0xb3, + 0x67, 0xf2, 0xf7, 0x80, 0xe0, 0xca, 0x6f, 0xd6, 0x7b, 0xed, 0x01, 0x85, 0x84, 0x13, 0xa2, 0xf4, + 0x64, 0xa6, 0xa7, 0xe9, 0xd9, 0x1e, 0xa6, 0x2f, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a, + 0x49, 0xd8, 0xbf, 0x3b, 0xda, 0xcd, 0xb3, 0xb0, 0x07, 0xd9, 0x77, 0x5a, 0x5d, 0xba, 0xba, 0x8f, + 0x0f, 0x2a, 0x1f, 0x3a, 0x46, 0x6e, 0xf5, 0x0b, 0x16, 0x9c, 0x6d, 0x67, 0x7e, 0x88, 0x60, 0x00, + 0x06, 0x13, 0x33, 0xf1, 0x4f, 0x57, 0xb1, 0x04, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xf4, 0x8b, 0xa0, + 0xf8, 0x9e, 0x5f, 0x04, 0x6b, 0x50, 0x62, 0x4c, 0x66, 0x9f, 0x14, 0xc3, 0xe9, 0x87, 0x11, 0x63, + 0x25, 0x96, 0x45, 0x45, 0xac, 0x48, 0xa0, 0xef, 0xb7, 0xe0, 0x7c, 0xba, 0xeb, 0x98, 0x30, 0xb0, + 0x08, 0x40, 0xc8, 0xdf, 0x82, 0xab, 0xe2, 0xfb, 0xcf, 0xd7, 0x7a, 0x21, 0x1f, 0xf6, 0x43, 0xc0, + 0xbd, 0x1b, 0x43, 0x95, 0x8c, 0xc7, 0xe8, 0x88, 0x29, 0x80, 0x1f, 0xe0, 0x41, 0xfa, 0x22, 0x8c, + 0xb7, 0x82, 0x8e, 0x1f, 0x0b, 0x4b, 0x19, 0xa1, 0xb5, 0x67, 0xda, 0xea, 0x35, 0xad, 0x1c, 0x1b, + 0x58, 0xa9, 0x67, 0x6c, 0xe9, 0x81, 0x9f, 0xb1, 0x6f, 0xc1, 0xb8, 0xaf, 0x99, 0x76, 0x0a, 0x7e, + 0xe0, 0x52, 0x7e, 0xf0, 0x50, 0xdd, 0x10, 0x94, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0x93, 0x7d, + 0x1b, 0xfd, 0x94, 0x95, 0xc1, 0xd4, 0xf3, 0xd7, 0xf2, 0x27, 0xcd, 0xd7, 0xf2, 0xa5, 0xf4, 0x6b, + 0xb9, 0x4b, 0xf8, 0x6a, 0x3c, 0x94, 0x07, 0x4f, 0x74, 0x32, 0x68, 0x9c, 0x40, 0xbb, 0x09, 0x17, + 0xfb, 0x5d, 0x4b, 0xcc, 0x64, 0xca, 0x55, 0xaa, 0xb6, 0xc4, 0x64, 0xca, 0xad, 0x56, 0x30, 0x83, + 0x0c, 0x1a, 0x48, 0xc6, 0xfe, 0x1f, 0x16, 0x14, 0x6b, 0x81, 0x7b, 0x02, 0xc2, 0xe4, 0x4f, 0x19, + 0xc2, 0xe4, 0x47, 0xb3, 0x2f, 0x44, 0x37, 0x57, 0x74, 0xbc, 0x92, 0x12, 0x1d, 0x9f, 0xcf, 0x23, + 0xd0, 0x5b, 0x50, 0xfc, 0xe3, 0x45, 0x18, 0xab, 0x05, 0xae, 0xb2, 0x57, 0xfe, 0xf5, 0x07, 0xb1, + 0x57, 0xce, 0x8d, 0xf0, 0xaf, 0x51, 0x66, 0x96, 0x56, 0xd2, 0xc9, 0xf2, 0x2f, 0x98, 0xd9, 0xf2, + 0x1d, 0xe2, 0x6d, 0xef, 0xc4, 0xc4, 0x4d, 0x7f, 0xce, 0xc9, 0x99, 0x2d, 0xff, 0x77, 0x0b, 0xa6, + 0x52, 0xad, 0xa3, 0x26, 0x4c, 0x34, 0x75, 0xc1, 0xa4, 0x58, 0xa7, 0x0f, 0x24, 0xd3, 0x14, 0x66, + 0x9f, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x05, 0x00, 0xa5, 0xa9, 0x93, 0x12, 0x30, 0xc6, 0xf5, 0x2b, + 0x55, 0x5e, 0x84, 0x35, 0x0c, 0xf4, 0x12, 0x8c, 0xc5, 0x41, 0x3b, 0x68, 0x06, 0xdb, 0xfb, 0xd7, + 0x89, 0x0c, 0x5d, 0xa4, 0x8c, 0xb9, 0x36, 0x12, 0x10, 0xd6, 0xf1, 0xec, 0x9f, 0x2c, 0xf2, 0x0f, + 0xf5, 0x63, 0xef, 0x83, 0x35, 0xf9, 0xfe, 0x5e, 0x93, 0x5f, 0xb1, 0x60, 0x9a, 0xb6, 0xce, 0xcc, + 0x45, 0xe4, 0x65, 0xab, 0x82, 0x06, 0x5b, 0x3d, 0x82, 0x06, 0x5f, 0xa2, 0x67, 0x97, 0x1b, 0x74, + 0x62, 0x21, 0x41, 0xd3, 0x0e, 0x27, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x30, 0x14, 0x3e, 0x6e, + 0x3a, 0x1e, 0x09, 0x43, 0x2c, 0xa0, 0x32, 0xa6, 0xf0, 0x50, 0x76, 0x4c, 0x61, 0x1e, 0x88, 0x51, + 0x18, 0x16, 0x08, 0xb6, 0x47, 0x0b, 0xc4, 0x28, 0x2d, 0x0e, 0x12, 0x1c, 0xfb, 0xe7, 0x8a, 0x30, + 0x5e, 0x0b, 0xdc, 0x44, 0x57, 0xf6, 0xa2, 0xa1, 0x2b, 0xbb, 0x98, 0xd2, 0x95, 0x4d, 0xeb, 0xb8, + 0x1f, 0x68, 0xc6, 0xbe, 0x56, 0x9a, 0xb1, 0x7f, 0x66, 0xb1, 0x59, 0xab, 0xac, 0xd7, 0xb9, 0xf5, + 0x11, 0x7a, 0x0e, 0xc6, 0xd8, 0x81, 0xc4, 0x9c, 0x2a, 0xa5, 0x02, 0x89, 0xe5, 0x50, 0x5a, 0x4f, + 0x8a, 0xb1, 0x8e, 0x83, 0x2e, 0x43, 0x29, 0x22, 0x4e, 0xd8, 0xd8, 0x51, 0x67, 0x9c, 0xd0, 0xf6, + 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0x8d, 0x24, 0x06, 0x60, 0x31, 0xdf, 0x49, 0x4b, 0xef, 0x0f, 0xdf, + 0x22, 0xf9, 0x81, 0xff, 0xec, 0x3b, 0x80, 0xba, 0xf1, 0x07, 0x08, 0x7e, 0x35, 0x6f, 0x06, 0xbf, + 0x2a, 0x77, 0x05, 0xbe, 0xfa, 0x33, 0x0b, 0x26, 0x6b, 0x81, 0x4b, 0xb7, 0xee, 0xd7, 0xd3, 0x3e, + 0xd5, 0x03, 0xa0, 0x8e, 0xf4, 0x08, 0x80, 0xfa, 0x04, 0x0c, 0xd7, 0x02, 0xb7, 0x5a, 0xeb, 0xe5, + 0xdc, 0x6c, 0xff, 0x5d, 0x0b, 0x46, 0x6b, 0x81, 0x7b, 0x02, 0xc2, 0xf9, 0x4f, 0x9a, 0xc2, 0xf9, + 0x47, 0x72, 0xd6, 0x4d, 0x8e, 0x3c, 0xfe, 0x17, 0x8a, 0x30, 0x41, 0xfb, 0x19, 0x6c, 0xcb, 0xa9, + 0x34, 0x86, 0xcd, 0x1a, 0x60, 0xd8, 0x28, 0x2f, 0x1c, 0x34, 0x9b, 0xc1, 0xdd, 0xf4, 0xb4, 0xae, + 0xb2, 0x52, 0x2c, 0xa0, 0xe8, 0x19, 0x28, 0xb5, 0x43, 0xb2, 0xe7, 0x05, 0x82, 0xc9, 0xd4, 0x54, + 0x1d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0x9c, 0x45, 0x9e, 0xdf, 0x20, 0x75, 0xd2, 0x08, 0x7c, + 0x97, 0xcb, 0xaf, 0x8b, 0x22, 0x6f, 0x80, 0x56, 0x8e, 0x0d, 0x2c, 0x74, 0x07, 0xca, 0xec, 0x3f, + 0x3b, 0x76, 0x8e, 0x9e, 0x4e, 0x52, 0xa4, 0x17, 0x13, 0x04, 0x70, 0x42, 0x0b, 0x3d, 0x0f, 0x10, + 0xcb, 0x10, 0xd9, 0x91, 0x08, 0x74, 0xa4, 0x18, 0x72, 0x15, 0x3c, 0x3b, 0xc2, 0x1a, 0x16, 0x7a, + 0x1a, 0xca, 0xb1, 0xe3, 0x35, 0x6f, 0x78, 0x3e, 0x89, 0x98, 0x5c, 0xba, 0x28, 0xb3, 0x7c, 0x89, + 0x42, 0x9c, 0xc0, 0x29, 0x43, 0xc4, 0xa2, 0x00, 0xf0, 0x64, 0xb4, 0x25, 0x86, 0xcd, 0x18, 0xa2, + 0x1b, 0xaa, 0x14, 0x6b, 0x18, 0xf6, 0x2b, 0x70, 0xa6, 0x16, 0xb8, 0xb5, 0x20, 0x8c, 0x57, 0x83, + 0xf0, 0xae, 0x13, 0xba, 0x72, 0xfe, 0xe6, 0x65, 0x72, 0x10, 0x7a, 0x40, 0x0d, 0xf3, 0xed, 0x6b, + 0xa4, 0xa8, 0x7a, 0x81, 0xb1, 0x44, 0x47, 0xf4, 0x11, 0x69, 0xb0, 0xcb, 0x59, 0xa5, 0x81, 0xb8, + 0xea, 0xc4, 0x04, 0xdd, 0x64, 0xb9, 0x6a, 0x93, 0x7b, 0x4a, 0x54, 0x7f, 0x4a, 0xcb, 0x55, 0x9b, + 0x00, 0x33, 0x2f, 0x36, 0xb3, 0xbe, 0xfd, 0x33, 0x43, 0xec, 0xc8, 0x4a, 0xa5, 0x12, 0x40, 0x9f, + 0x83, 0xc9, 0x88, 0xdc, 0xf0, 0xfc, 0xce, 0x3d, 0xf9, 0x52, 0xef, 0xe1, 0xe5, 0x53, 0x5f, 0xd1, + 0x31, 0xb9, 0xbc, 0xcf, 0x2c, 0xc3, 0x29, 0x6a, 0xa8, 0x05, 0x93, 0x77, 0x3d, 0xdf, 0x0d, 0xee, + 0x46, 0x92, 0x7e, 0x29, 0x5f, 0xec, 0x77, 0x87, 0x63, 0xa6, 0xfa, 0x68, 0x34, 0x77, 0xc7, 0x20, + 0x86, 0x53, 0xc4, 0xe9, 0xb2, 0x08, 0x3b, 0xfe, 0x62, 0x74, 0x2b, 0x22, 0xa1, 0xc8, 0x3a, 0xcc, + 0x96, 0x05, 0x96, 0x85, 0x38, 0x81, 0xd3, 0x65, 0xc1, 0xfe, 0x5c, 0x0d, 0x83, 0x0e, 0x0f, 0x2f, + 0x2f, 0x96, 0x05, 0x56, 0xa5, 0x58, 0xc3, 0xa0, 0xdb, 0x86, 0xfd, 0x5b, 0x0f, 0x7c, 0x1c, 0x04, + 0xb1, 0xdc, 0x68, 0x2c, 0xcf, 0xa5, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0xea, 0xb4, 0xdb, + 0x4d, 0x66, 0x3d, 0xe0, 0x34, 0x19, 0x29, 0xae, 0xb9, 0x2d, 0xf2, 0xe0, 0x99, 0xf5, 0x2e, 0x28, + 0xce, 0xa8, 0x41, 0x4f, 0xd0, 0x2d, 0xd1, 0xd5, 0x61, 0xd6, 0x55, 0xae, 0x22, 0xa8, 0xf3, 0x7e, + 0x4a, 0x18, 0x5a, 0x81, 0xd1, 0x68, 0x3f, 0x6a, 0xc4, 0x22, 0x0a, 0x58, 0x4e, 0xb6, 0x98, 0x3a, + 0x43, 0xd1, 0x92, 0x95, 0xf1, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x8d, 0x5d, 0xd0, 0x2c, 0x47, 0x6d, + 0xdc, 0x09, 0x09, 0x6a, 0xc1, 0x44, 0x9b, 0xad, 0x30, 0x11, 0x2f, 0x5d, 0x2c, 0x93, 0x17, 0x07, + 0x7c, 0x69, 0xdf, 0xa5, 0xe7, 0x9a, 0x92, 0x84, 0xb1, 0x27, 0x4c, 0x4d, 0x27, 0x87, 0x4d, 0xea, + 0xf6, 0x57, 0xce, 0xb2, 0x23, 0xbe, 0xce, 0x9f, 0xcf, 0xa3, 0xc2, 0xdc, 0x59, 0xbc, 0x15, 0xe6, + 0xf2, 0xe5, 0x38, 0xc9, 0x17, 0x09, 0x93, 0x69, 0x2c, 0xeb, 0xa2, 0xcf, 0xc2, 0x24, 0x65, 0xbd, + 0xb5, 0x7c, 0x11, 0xa7, 0xf3, 0xfd, 0xd2, 0x93, 0x34, 0x11, 0x5a, 0x2e, 0x05, 0xbd, 0x32, 0x4e, + 0x11, 0x43, 0x6f, 0x30, 0xc5, 0xbc, 0x99, 0x8a, 0xa2, 0x0f, 0x69, 0x5d, 0x07, 0x2f, 0xc9, 0x6a, + 0x44, 0xf2, 0xd2, 0x5c, 0xd8, 0x0f, 0x37, 0xcd, 0x05, 0xba, 0x01, 0x13, 0x22, 0x51, 0xab, 0x10, + 0x3f, 0x16, 0x0d, 0xf1, 0xd2, 0x04, 0xd6, 0x81, 0x87, 0xe9, 0x02, 0x6c, 0x56, 0x46, 0xdb, 0x70, + 0x5e, 0xcb, 0xb5, 0x72, 0x35, 0x74, 0x98, 0x8e, 0xd8, 0x63, 0x27, 0x91, 0x76, 0xf9, 0x3c, 0x7e, + 0xff, 0x60, 0xfe, 0xfc, 0x46, 0x2f, 0x44, 0xdc, 0x9b, 0x0e, 0xba, 0x09, 0x67, 0xb8, 0x57, 0x65, + 0x85, 0x38, 0x6e, 0xd3, 0xf3, 0xd5, 0xed, 0xc6, 0x77, 0xcb, 0xb9, 0xfb, 0x07, 0xf3, 0x67, 0x16, + 0xb3, 0x10, 0x70, 0x76, 0x3d, 0xf4, 0x49, 0x28, 0xbb, 0x7e, 0x24, 0xc6, 0x60, 0xc4, 0x48, 0x67, + 0x53, 0xae, 0xac, 0xd7, 0xd5, 0xf7, 0x27, 0x7f, 0x70, 0x52, 0x01, 0x6d, 0x73, 0x11, 0xa4, 0x7a, + 0xf1, 0x8f, 0x76, 0xc5, 0x83, 0x49, 0xcb, 0x8e, 0x0c, 0xbf, 0x2a, 0x2e, 0x7b, 0x57, 0xd6, 0xc6, + 0x86, 0xcb, 0x95, 0x41, 0x18, 0xbd, 0x0e, 0x88, 0xb2, 0xc4, 0x5e, 0x83, 0x2c, 0x36, 0x58, 0x30, + 0x7e, 0x26, 0xb1, 0x2d, 0x19, 0xde, 0x29, 0xa8, 0xde, 0x85, 0x81, 0x33, 0x6a, 0xa1, 0x6b, 0xf4, + 0x36, 0xd0, 0x4b, 0x85, 0xd5, 0xb4, 0x4a, 0x3e, 0x56, 0x21, 0xed, 0x90, 0x34, 0x9c, 0x98, 0xb8, + 0x26, 0x45, 0x9c, 0xaa, 0x87, 0x5c, 0x78, 0xcc, 0xe9, 0xc4, 0x01, 0x93, 0xee, 0x9a, 0xa8, 0x1b, + 0xc1, 0x2e, 0xf1, 0x99, 0x62, 0xa5, 0xb4, 0x74, 0xf1, 0xfe, 0xc1, 0xfc, 0x63, 0x8b, 0x3d, 0xf0, + 0x70, 0x4f, 0x2a, 0x94, 0xed, 0x51, 0xa9, 0x43, 0xc1, 0x0c, 0x73, 0x93, 0x91, 0x3e, 0xf4, 0x25, + 0x18, 0xdb, 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0x77, 0x83, 0x70, 0x57, 0x04, 0x2b, 0x4c, 0x02, 0xdc, + 0x26, 0x20, 0xac, 0xe3, 0xd1, 0x77, 0x0d, 0x53, 0xfb, 0x57, 0x2b, 0x4c, 0xe3, 0x5a, 0x4a, 0xce, + 0x98, 0x6b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0xad, 0xd6, 0x96, 0x99, 0xf6, 0x34, 0x85, 0x5a, 0xad, + 0x2d, 0x63, 0x09, 0xa7, 0xcb, 0x35, 0xda, 0x71, 0x42, 0x52, 0x0b, 0x83, 0x06, 0x89, 0xb4, 0xb0, + 0xca, 0x8f, 0xf2, 0x50, 0x8c, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76, 0x3d, 0x44, 0xba, 0xf3, + 0x0c, 0x4d, 0xe6, 0x8b, 0xbd, 0xbb, 0x59, 0x81, 0x01, 0x53, 0x0d, 0xf9, 0x30, 0xad, 0x32, 0x1c, + 0xf1, 0xe0, 0x8b, 0xd1, 0xec, 0x14, 0x5b, 0xdb, 0x83, 0x47, 0x6e, 0x54, 0x8a, 0x84, 0x6a, 0x8a, + 0x12, 0xee, 0xa2, 0x6d, 0x44, 0x32, 0x9a, 0xee, 0x9b, 0x4b, 0xf6, 0x0a, 0x94, 0xa3, 0xce, 0xa6, + 0x1b, 0xb4, 0x1c, 0xcf, 0x67, 0xda, 0x53, 0x8d, 0xc1, 0xae, 0x4b, 0x00, 0x4e, 0x70, 0xd0, 0x2a, + 0x94, 0x1c, 0xa9, 0x25, 0x40, 0xf9, 0x01, 0x30, 0x94, 0x6e, 0x80, 0xfb, 0x84, 0x4b, 0xbd, 0x80, + 0xaa, 0x8b, 0x5e, 0x85, 0x09, 0xe1, 0x15, 0x28, 0x92, 0xeb, 0x9d, 0x32, 0x3d, 0x37, 0xea, 0x3a, + 0x10, 0x9b, 0xb8, 0xe8, 0x16, 0x8c, 0xc5, 0x41, 0x93, 0xb9, 0x1f, 0x50, 0x0e, 0xe9, 0x6c, 0x7e, + 0x10, 0xad, 0x0d, 0x85, 0xa6, 0x0b, 0xe8, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, 0xbd, 0xb3, + 0xf0, 0xc2, 0x24, 0x9a, 0x7d, 0x24, 0xff, 0x4e, 0x52, 0x51, 0x88, 0xcd, 0xed, 0x20, 0x6a, 0x62, + 0x9d, 0x0c, 0xba, 0x0a, 0x33, 0xed, 0xd0, 0x0b, 0xd8, 0x9a, 0x50, 0x0a, 0xa2, 0x59, 0x33, 0x29, + 0x4a, 0x2d, 0x8d, 0x80, 0xbb, 0xeb, 0x30, 0xa7, 0x4e, 0x51, 0x38, 0x7b, 0x8e, 0x27, 0xd3, 0xe5, + 0xef, 0x15, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb1, 0x93, 0x98, 0x3f, 0xb5, 0x67, 0xe7, 0xf2, 0x63, + 0x6e, 0xe8, 0x4f, 0x72, 0xce, 0xf7, 0xa9, 0xbf, 0x38, 0xa1, 0x80, 0x5c, 0x2d, 0x51, 0x1b, 0x65, + 0xb6, 0xa3, 0xd9, 0xc7, 0x7a, 0xd8, 0x5e, 0xa5, 0x38, 0xf3, 0x84, 0x21, 0x30, 0x8a, 0x23, 0x9c, + 0xa2, 0x89, 0xbe, 0x09, 0xa6, 0x45, 0x8c, 0xaf, 0x64, 0x98, 0xce, 0x27, 0x46, 0x9d, 0x38, 0x05, + 0xc3, 0x5d, 0xd8, 0x3c, 0xec, 0xba, 0xb3, 0xd9, 0x24, 0xe2, 0xe8, 0xbb, 0xe1, 0xf9, 0xbb, 0xd1, + 0xec, 0x05, 0x76, 0x3e, 0x88, 0xb0, 0xeb, 0x69, 0x28, 0xce, 0xa8, 0x81, 0x36, 0x60, 0xba, 0x1d, + 0x12, 0xd2, 0x62, 0x3c, 0xb2, 0xb8, 0xcf, 0xe6, 0xb9, 0x4f, 0x33, 0xed, 0x49, 0x2d, 0x05, 0x3b, + 0xcc, 0x28, 0xc3, 0x5d, 0x14, 0xd0, 0x5d, 0x28, 0x05, 0x7b, 0x24, 0xdc, 0x21, 0x8e, 0x3b, 0x7b, + 0xb1, 0x87, 0x91, 0xb1, 0xb8, 0xdc, 0x6e, 0x0a, 0xdc, 0x94, 0x52, 0x59, 0x16, 0xf7, 0x57, 0x2a, + 0xcb, 0xc6, 0xd0, 0x0f, 0x58, 0x70, 0x4e, 0xca, 0xa1, 0xeb, 0x6d, 0x3a, 0xea, 0xcb, 0x81, 0x1f, + 0xc5, 0x21, 0xf7, 0xc2, 0x7d, 0x3c, 0xdf, 0x31, 0x75, 0x23, 0xa7, 0x92, 0x92, 0xf6, 0x9d, 0xcb, + 0xc3, 0x88, 0x70, 0x7e, 0x8b, 0x73, 0xdf, 0x08, 0x33, 0x5d, 0x37, 0xf7, 0x51, 0x32, 0x41, 0xcc, + 0xed, 0xc2, 0x84, 0x31, 0x3a, 0x0f, 0x55, 0x9f, 0xf8, 0xaf, 0x47, 0xa1, 0xac, 0x74, 0x4d, 0xe8, + 0x8a, 0xa9, 0x42, 0x3c, 0x97, 0x56, 0x21, 0x96, 0xe8, 0x6b, 0x56, 0xd7, 0x1a, 0x6e, 0x64, 0xc4, + 0x3c, 0xca, 0xdb, 0x8b, 0x83, 0xfb, 0xb2, 0x6a, 0xa2, 0xc3, 0xe2, 0xc0, 0xba, 0xc8, 0xa1, 0x9e, + 0xd2, 0xc8, 0xab, 0x30, 0xe3, 0x07, 0x8c, 0x5d, 0x24, 0xae, 0xe4, 0x05, 0xd8, 0x95, 0x5f, 0xd6, + 0x83, 0x08, 0xa4, 0x10, 0x70, 0x77, 0x1d, 0xda, 0x20, 0xbf, 0xb3, 0xd3, 0xe2, 0x4f, 0x7e, 0xa5, + 0x63, 0x01, 0x45, 0x4f, 0xc0, 0x70, 0x3b, 0x70, 0xab, 0x35, 0xc1, 0x2a, 0x6a, 0x59, 0x41, 0xdd, + 0x6a, 0x0d, 0x73, 0x18, 0x5a, 0x84, 0x11, 0xf6, 0x23, 0x9a, 0x1d, 0xcf, 0xf7, 0x16, 0x67, 0x35, + 0xb4, 0x3c, 0x1b, 0xac, 0x02, 0x16, 0x15, 0x99, 0x18, 0x86, 0xf2, 0xd7, 0x4c, 0x0c, 0x33, 0xfa, + 0x80, 0x62, 0x18, 0x49, 0x00, 0x27, 0xb4, 0xd0, 0x3d, 0x38, 0x63, 0xbc, 0x69, 0xf8, 0x12, 0x21, + 0x91, 0x70, 0x58, 0x7d, 0xa2, 0xe7, 0x63, 0x46, 0xe8, 0x2e, 0xcf, 0x8b, 0x4e, 0x9f, 0xa9, 0x66, + 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0xcc, 0x34, 0xba, 0x5a, 0x2d, 0x0d, 0xde, 0xaa, 0x9a, 0xd0, + 0xee, 0x16, 0xbb, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x4e, 0x10, 0xb1, 0x63, 0x56, 0xb0, 0xb7, 0xd2, + 0xdb, 0xb1, 0xf4, 0xc6, 0xcd, 0x3a, 0x2b, 0x3f, 0x3c, 0x98, 0x1f, 0xab, 0x05, 0xae, 0xfc, 0x8b, + 0x55, 0x05, 0xf4, 0x3d, 0x16, 0xcc, 0x75, 0x3f, 0x9a, 0x54, 0xa7, 0x27, 0x06, 0xef, 0xb4, 0x2d, + 0x1a, 0x9d, 0x5b, 0xc9, 0x25, 0x87, 0x7b, 0x34, 0x65, 0xff, 0x32, 0xd7, 0x33, 0x0a, 0x6d, 0x04, + 0x89, 0x3a, 0xcd, 0x93, 0xc8, 0x4b, 0xb8, 0x62, 0x28, 0x4a, 0x1e, 0x58, 0x97, 0xfd, 0x6b, 0x16, + 0xd3, 0x65, 0x6f, 0x90, 0x56, 0xbb, 0xe9, 0xc4, 0x27, 0xe1, 0x2c, 0xf7, 0x06, 0x94, 0x62, 0xd1, + 0x5a, 0xaf, 0x54, 0x8a, 0x5a, 0xa7, 0x98, 0x3e, 0x5f, 0x31, 0x9b, 0xb2, 0x14, 0x2b, 0x32, 0xf6, + 0x3f, 0xe6, 0x33, 0x20, 0x21, 0x27, 0x20, 0x8f, 0xae, 0x98, 0xf2, 0xe8, 0xf9, 0x3e, 0x5f, 0x90, + 0x23, 0x97, 0xfe, 0x47, 0x66, 0xbf, 0x99, 0x90, 0xe5, 0xfd, 0x6e, 0x44, 0x61, 0xff, 0x90, 0x05, + 0xa7, 0xb3, 0xac, 0x0e, 0xe9, 0x03, 0x81, 0x8b, 0x78, 0x94, 0x51, 0x89, 0x1a, 0xc1, 0xdb, 0xa2, + 0x1c, 0x2b, 0x8c, 0x81, 0xb3, 0x14, 0x1d, 0x2d, 0x6a, 0xe7, 0x4d, 0x98, 0xa8, 0x85, 0x44, 0xbb, + 0xd0, 0x5e, 0xe3, 0xde, 0xaf, 0xbc, 0x3f, 0xcf, 0x1c, 0xd9, 0xf3, 0xd5, 0xfe, 0xe9, 0x02, 0x9c, + 0xe6, 0x5a, 0xe1, 0xc5, 0xbd, 0xc0, 0x73, 0x6b, 0x81, 0x2b, 0x32, 0x4c, 0xbd, 0x09, 0xe3, 0x6d, + 0x4d, 0x2e, 0xd7, 0x2b, 0x02, 0x9d, 0x2e, 0xbf, 0x4b, 0x24, 0x09, 0x7a, 0x29, 0x36, 0x68, 0x21, + 0x17, 0xc6, 0xc9, 0x9e, 0xd7, 0x50, 0xaa, 0xc5, 0xc2, 0x91, 0x2f, 0x17, 0xd5, 0xca, 0x8a, 0x46, + 0x07, 0x1b, 0x54, 0x1f, 0x42, 0xd2, 0x51, 0xfb, 0x87, 0x2d, 0x78, 0x24, 0x27, 0x5e, 0x1d, 0x6d, + 0xee, 0x2e, 0xd3, 0xbf, 0x8b, 0xfc, 0x85, 0xaa, 0x39, 0xae, 0x95, 0xc7, 0x02, 0x8a, 0x3e, 0x0d, + 0xc0, 0xb5, 0xea, 0xf4, 0x85, 0xda, 0x2f, 0xb0, 0x97, 0x11, 0x93, 0x48, 0x0b, 0x2f, 0x23, 0xeb, + 0x63, 0x8d, 0x96, 0xfd, 0x13, 0x45, 0x18, 0xe6, 0x99, 0x97, 0x57, 0x61, 0x74, 0x87, 0xc7, 0xdd, + 0x1f, 0x24, 0xc4, 0x7f, 0x22, 0x3b, 0xe0, 0x05, 0x58, 0x56, 0x46, 0x6b, 0x70, 0x8a, 0xe7, 0x2d, + 0x68, 0x56, 0x48, 0xd3, 0xd9, 0x97, 0x82, 0x2e, 0x9e, 0xf3, 0x4f, 0x09, 0xfc, 0xaa, 0xdd, 0x28, + 0x38, 0xab, 0x1e, 0x7a, 0x0d, 0x26, 0xe9, 0xc3, 0x23, 0xe8, 0xc4, 0x92, 0x12, 0xcf, 0x58, 0xa0, + 0x5e, 0x3a, 0x1b, 0x06, 0x14, 0xa7, 0xb0, 0xe9, 0xdb, 0xb7, 0xdd, 0x25, 0xd2, 0x1b, 0x4e, 0xde, + 0xbe, 0xa6, 0x18, 0xcf, 0xc4, 0x65, 0xe6, 0x86, 0x1d, 0x66, 0x5c, 0xb9, 0xb1, 0x13, 0x92, 0x68, + 0x27, 0x68, 0xba, 0x8c, 0xd1, 0x1a, 0xd6, 0xcc, 0x0d, 0x53, 0x70, 0xdc, 0x55, 0x83, 0x52, 0xd9, + 0x72, 0xbc, 0x66, 0x27, 0x24, 0x09, 0x95, 0x11, 0x93, 0xca, 0x6a, 0x0a, 0x8e, 0xbb, 0x6a, 0xd0, + 0x75, 0x74, 0xa6, 0x16, 0x06, 0xf4, 0xf0, 0x92, 0x31, 0x38, 0x94, 0x0d, 0xe9, 0xa8, 0x74, 0x17, + 0xec, 0x11, 0xae, 0x4a, 0x58, 0xd9, 0x71, 0x0a, 0x86, 0x02, 0xb9, 0x2e, 0x1c, 0x05, 0x25, 0x15, + 0xf4, 0x1c, 0x8c, 0x89, 0x68, 0xf4, 0xcc, 0xd4, 0x91, 0x4f, 0x1d, 0x53, 0x78, 0x57, 0x92, 0x62, + 0xac, 0xe3, 0xd8, 0xdf, 0x5b, 0x80, 0x53, 0x19, 0xb6, 0xea, 0xfc, 0xa8, 0xda, 0xf6, 0xa2, 0x58, + 0xe5, 0x35, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, 0xfc, 0x30, 0x4c, 0x1f, 0x80, + 0xc2, 0x16, 0x54, 0x40, 0x8f, 0x98, 0x21, 0xec, 0x22, 0x0c, 0x75, 0x22, 0x22, 0x03, 0xcd, 0xa9, + 0xf3, 0x9b, 0x69, 0x5c, 0x18, 0x84, 0xb2, 0xc7, 0xdb, 0x4a, 0x79, 0xa1, 0xb1, 0xc7, 0x5c, 0x7d, + 0xc1, 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x4c, 0x74, 0x12, 0x31, 0x89, 0x95, 0x62, + 0x01, 0xb5, 0xbf, 0x54, 0x84, 0x73, 0xb9, 0xde, 0x2b, 0xb4, 0xeb, 0xad, 0xc0, 0xf7, 0xe2, 0x40, + 0x59, 0x12, 0xf0, 0x28, 0x49, 0xa4, 0xbd, 0xb3, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x60, 0x98, + 0x09, 0x9d, 0xba, 0x32, 0xbc, 0x2d, 0x55, 0x78, 0xd4, 0x0c, 0x0e, 0x1e, 0x38, 0x7b, 0xe6, 0x13, + 0x30, 0xd4, 0x0e, 0x82, 0x66, 0xfa, 0xd0, 0xa2, 0xdd, 0x0d, 0x82, 0x26, 0x66, 0x40, 0xf4, 0x11, + 0x31, 0x5e, 0x29, 0xd5, 0x39, 0x76, 0xdc, 0x20, 0xd2, 0x06, 0xed, 0x29, 0x18, 0xdd, 0x25, 0xfb, + 0xa1, 0xe7, 0x6f, 0xa7, 0x4d, 0x2a, 0xae, 0xf3, 0x62, 0x2c, 0xe1, 0x66, 0xb2, 0x9e, 0xd1, 0xe3, + 0x4e, 0x7b, 0x59, 0xea, 0x7b, 0x05, 0x7e, 0x5f, 0x11, 0xa6, 0xf0, 0x52, 0xe5, 0x83, 0x89, 0xb8, + 0xd5, 0x3d, 0x11, 0xc7, 0x9d, 0xf6, 0xb2, 0xff, 0x6c, 0xfc, 0x82, 0x05, 0x53, 0x2c, 0x26, 0xbe, + 0x08, 0xaf, 0xe3, 0x05, 0xfe, 0x09, 0xb0, 0x78, 0x4f, 0xc0, 0x70, 0x48, 0x1b, 0x4d, 0xa7, 0x76, + 0x63, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x83, 0x21, 0xd6, 0x05, 0x3a, 0x79, 0xe3, 0x3c, 0x2b, 0x4e, + 0xc5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0xcc, 0x08, 0x4c, 0xda, 0x4d, 0x8f, 0x77, 0x3a, 0x51, 0x09, + 0xbe, 0x3f, 0x62, 0x46, 0x64, 0x76, 0xed, 0xbd, 0xc5, 0x8c, 0xc8, 0x26, 0xd9, 0xfb, 0xf9, 0xf4, + 0x87, 0x05, 0xb8, 0x90, 0x59, 0x6f, 0xe0, 0x98, 0x11, 0xbd, 0x6b, 0x3f, 0xcc, 0xd8, 0xe9, 0xc5, + 0x13, 0x34, 0x58, 0x1b, 0x1a, 0x94, 0xc3, 0x1c, 0x1e, 0x20, 0x94, 0x43, 0xe6, 0x90, 0xbd, 0x4f, + 0x42, 0x39, 0x64, 0xf6, 0x2d, 0xe7, 0xf9, 0xf7, 0xe7, 0x85, 0x9c, 0x6f, 0x61, 0x0f, 0xc1, 0xcb, + 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xe3, 0xfc, 0x8c, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, + 0x4c, 0xb5, 0x3c, 0x9f, 0x1e, 0x3e, 0xfb, 0x26, 0xe3, 0xa7, 0x22, 0xed, 0xac, 0x99, 0x60, 0x9c, + 0xc6, 0x47, 0x9e, 0x16, 0xe6, 0xa1, 0x90, 0x9f, 0x2c, 0x39, 0xb7, 0xb7, 0x0b, 0xa6, 0xba, 0x54, + 0x8d, 0x62, 0x46, 0xc8, 0x87, 0x35, 0xed, 0xfd, 0x5f, 0x1c, 0xfc, 0xfd, 0x3f, 0x9e, 0xfd, 0xf6, + 0x9f, 0x7b, 0x15, 0x26, 0x1e, 0x58, 0xe0, 0x6b, 0x7f, 0xa5, 0x08, 0x8f, 0xf6, 0xd8, 0xf6, 0xfc, + 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0xbb, 0xe6, 0xa1, 0x06, 0xa7, 0xb7, 0x3a, 0xcd, 0xe6, 0x3e, + 0xb3, 0x09, 0x27, 0xae, 0xc4, 0x10, 0x3c, 0xe5, 0x63, 0x32, 0x0f, 0xd1, 0x6a, 0x06, 0x0e, 0xce, + 0xac, 0x49, 0x19, 0x7a, 0x7a, 0x93, 0xec, 0x2b, 0x52, 0x29, 0x86, 0x1e, 0xeb, 0x40, 0x6c, 0xe2, + 0xa2, 0xab, 0x30, 0xe3, 0xec, 0x39, 0x1e, 0x0f, 0x96, 0x29, 0x09, 0x70, 0x8e, 0x5e, 0xc9, 0xe9, + 0x16, 0xd3, 0x08, 0xb8, 0xbb, 0x0e, 0x7a, 0x1d, 0x50, 0x20, 0x92, 0xbd, 0x5f, 0x25, 0xbe, 0xd0, + 0x6a, 0xb1, 0xb9, 0x2b, 0x26, 0x47, 0xc2, 0xcd, 0x2e, 0x0c, 0x9c, 0x51, 0x2b, 0x15, 0x36, 0x61, + 0x24, 0x3f, 0x6c, 0x42, 0xef, 0x73, 0xb1, 0x6f, 0xd8, 0xfe, 0xff, 0x62, 0xd1, 0xeb, 0x8b, 0x33, + 0xf9, 0x66, 0xf4, 0xaf, 0x57, 0x99, 0x41, 0x17, 0x97, 0xe1, 0x69, 0x11, 0x0c, 0xce, 0x68, 0x06, + 0x5d, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xe3, 0x9c, 0xc1, 0xe2, 0x8b, 0x10, 0x25, + 0x0a, 0x03, 0x7d, 0x06, 0x46, 0x5d, 0x6f, 0xcf, 0x8b, 0x82, 0x50, 0xac, 0xf4, 0x23, 0xaa, 0x0b, + 0x92, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xbe, 0x02, 0x4c, 0xc8, 0x16, 0xdf, 0xe8, + 0x04, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x55, 0xe3, 0x5a, 0xfe, 0x48, 0xaf, 0x38, 0x2d, 0xac, 0x4b, + 0xb9, 0xd7, 0xf1, 0xcd, 0xd4, 0x75, 0xfc, 0x64, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x62, 0xc1, + 0x8c, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xaa, 0x79, 0x1b, 0x3c, 0xde, 0xf7, 0x1b, 0x72, 0x6e, 0x81, + 0xef, 0x2a, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0x77, 0x60, 0x68, 0xc7, 0x09, 0xdd, 0x5e, 0x81, 0xa9, + 0xbb, 0x2a, 0x2d, 0x5c, 0x73, 0x42, 0xa1, 0xd6, 0x7b, 0x46, 0xe5, 0x2a, 0x76, 0xc2, 0xfe, 0x2a, + 0x3d, 0xd6, 0x14, 0x7a, 0x05, 0x46, 0xa2, 0x46, 0xd0, 0x56, 0x56, 0xdc, 0x17, 0x79, 0x1e, 0x63, + 0x5a, 0x72, 0x78, 0x30, 0x8f, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x4d, 0x98, 0x60, 0xbf, + 0x94, 0x8d, 0x4d, 0x31, 0x3f, 0x89, 0x4d, 0x5d, 0x47, 0xe4, 0x06, 0x68, 0x46, 0x11, 0x36, 0x49, + 0xcd, 0x6d, 0x43, 0x59, 0x7d, 0xd6, 0x43, 0xd5, 0xc7, 0xfd, 0xfb, 0x22, 0x9c, 0xca, 0x58, 0x73, + 0x28, 0x32, 0x66, 0xe2, 0xb9, 0x01, 0x97, 0xea, 0x7b, 0x9c, 0x8b, 0x88, 0xbd, 0x86, 0x5c, 0xb1, + 0xb6, 0x06, 0x6e, 0xf4, 0x56, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xfe, 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, + 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xa4, 0x08, 0xa7, 0xb3, 0x42, 0x47, 0xa1, + 0x6f, 0x4d, 0x25, 0x34, 0x7b, 0x71, 0xd0, 0xa0, 0x53, 0x3c, 0xcb, 0x19, 0x97, 0x01, 0x2f, 0x2d, + 0x98, 0x29, 0xce, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x85, 0x87, 0x3c, 0x11, 0x9d, 0x3c, 0x3e, + 0x3e, 0x3e, 0x70, 0x07, 0x44, 0x06, 0xbb, 0x28, 0xa5, 0xbf, 0x97, 0xc5, 0xfd, 0xf5, 0xf7, 0xb2, + 0xe5, 0x39, 0x0f, 0xc6, 0xb4, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4b, 0x6f, 0x2b, 0xad, 0xdf, 0x0f, + 0x75, 0xd6, 0x7f, 0xd8, 0x82, 0x94, 0x35, 0xb4, 0x12, 0x8b, 0x59, 0xb9, 0x62, 0xb1, 0x8b, 0x30, + 0x14, 0x06, 0x4d, 0x92, 0xce, 0x1f, 0x86, 0x83, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xe2, 0x44, 0xd8, + 0x31, 0xae, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x09, 0x18, 0x6e, 0x92, 0x3d, 0xd2, 0x4c, 0xa7, 0x79, + 0xb8, 0x41, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x30, 0x04, 0xe7, 0x7b, 0x86, 0x55, 0xa0, 0xcf, 0xa1, + 0x6d, 0x27, 0x26, 0x77, 0x9d, 0xfd, 0x74, 0x3c, 0xf6, 0xab, 0xbc, 0x18, 0x4b, 0x38, 0xf3, 0x22, + 0xe1, 0x51, 0x55, 0x53, 0x42, 0x44, 0x11, 0x4c, 0x55, 0x40, 0x4d, 0xa1, 0x54, 0xf1, 0x38, 0x84, + 0x52, 0xcf, 0x03, 0x44, 0x51, 0x93, 0x1b, 0xbe, 0xb8, 0xc2, 0x3d, 0x25, 0x89, 0xbe, 0x5b, 0xbf, + 0x21, 0x20, 0x58, 0xc3, 0x42, 0x15, 0x98, 0x6e, 0x87, 0x41, 0xcc, 0x65, 0xb2, 0x15, 0x6e, 0x1b, + 0x36, 0x6c, 0x7a, 0xb4, 0xd7, 0x52, 0x70, 0xdc, 0x55, 0x03, 0xbd, 0x04, 0x63, 0xc2, 0xcb, 0xbd, + 0x16, 0x04, 0x4d, 0x21, 0x06, 0x52, 0xe6, 0x52, 0xf5, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, + 0xf4, 0x8e, 0x66, 0x56, 0xe3, 0xc2, 0x5e, 0x0d, 0x2f, 0x15, 0x46, 0xae, 0x34, 0x50, 0x18, 0xb9, + 0x44, 0x30, 0x56, 0x1e, 0x58, 0xb7, 0x05, 0x7d, 0x45, 0x49, 0x3f, 0x3b, 0x04, 0xa7, 0xc4, 0xc2, + 0x79, 0xd8, 0xcb, 0xe5, 0x56, 0xf7, 0x72, 0x39, 0x0e, 0xd1, 0xd9, 0x07, 0x6b, 0xe6, 0xa4, 0xd7, + 0xcc, 0xf7, 0x5b, 0x60, 0xb2, 0x57, 0xe8, 0xff, 0xcb, 0x4d, 0x68, 0xf1, 0x52, 0x2e, 0xbb, 0xe6, + 0xca, 0x0b, 0xe4, 0x3d, 0xa6, 0xb6, 0xb0, 0xff, 0x93, 0x05, 0x8f, 0xf7, 0xa5, 0x88, 0x56, 0xa0, + 0xcc, 0x78, 0x40, 0xed, 0x75, 0xf6, 0xa4, 0xb2, 0x1d, 0x95, 0x80, 0x1c, 0x96, 0x34, 0xa9, 0x89, + 0x56, 0xba, 0x32, 0x87, 0x3c, 0x95, 0x91, 0x39, 0xe4, 0x8c, 0x31, 0x3c, 0x0f, 0x98, 0x3a, 0xe4, + 0x97, 0x8b, 0x30, 0xc2, 0x57, 0xfc, 0x09, 0x3c, 0xc3, 0x56, 0x85, 0xdc, 0xb6, 0x47, 0x9c, 0x3a, + 0xde, 0x97, 0x85, 0x8a, 0x13, 0x3b, 0x9c, 0x4d, 0x50, 0xb7, 0x55, 0x22, 0xe1, 0x45, 0x9f, 0x03, + 0x88, 0xe2, 0xd0, 0xf3, 0xb7, 0x69, 0x99, 0x88, 0x60, 0xf8, 0xd1, 0x1e, 0xd4, 0xea, 0x0a, 0x99, + 0xd3, 0x4c, 0x76, 0xae, 0x02, 0x60, 0x8d, 0x22, 0x5a, 0x30, 0xee, 0xcb, 0xb9, 0x94, 0xe0, 0x13, + 0x38, 0xd5, 0xe4, 0xf6, 0x9c, 0x7b, 0x19, 0xca, 0x8a, 0x78, 0x3f, 0x29, 0xce, 0xb8, 0xce, 0x5c, + 0x7c, 0x0a, 0xa6, 0x52, 0x7d, 0x3b, 0x92, 0x10, 0xe8, 0x17, 0x2d, 0x98, 0xe2, 0x9d, 0x59, 0xf1, + 0xf7, 0xc4, 0x99, 0xfa, 0x2e, 0x9c, 0x6e, 0x66, 0x9c, 0x6d, 0x62, 0x46, 0x07, 0x3f, 0x0b, 0x95, + 0xd0, 0x27, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9, 0xba, 0xa5, 0x67, 0x97, 0xd3, 0x14, 0xce, + 0x86, 0xe3, 0x7c, 0xcd, 0xf2, 0x32, 0xac, 0xa0, 0xf6, 0x6f, 0x5b, 0x30, 0xc3, 0x7b, 0x7e, 0x9d, + 0xec, 0xab, 0x1d, 0xfe, 0xb5, 0xec, 0xbb, 0x48, 0xe6, 0x53, 0xc8, 0x49, 0xe6, 0xa3, 0x7f, 0x5a, + 0xb1, 0xe7, 0xa7, 0xfd, 0xb4, 0x05, 0x62, 0x85, 0x9c, 0xc0, 0x53, 0xfe, 0x1b, 0xcd, 0xa7, 0xfc, + 0x5c, 0xfe, 0x26, 0xc8, 0x79, 0xc3, 0xff, 0x99, 0x05, 0xd3, 0x1c, 0x21, 0xd1, 0x39, 0x7f, 0x4d, + 0xe7, 0x61, 0x90, 0x94, 0x9f, 0xd7, 0xc9, 0xfe, 0x46, 0x50, 0x73, 0xe2, 0x9d, 0xec, 0x8f, 0x32, + 0x26, 0x6b, 0xa8, 0xe7, 0x64, 0xb9, 0x72, 0x03, 0x1d, 0x21, 0x8f, 0xf0, 0x91, 0x43, 0xdd, 0xdb, + 0x5f, 0xb5, 0x00, 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, 0x15, 0xac, 0x54, 0xbb, 0x2e, 0x92, 0xa3, + 0x49, 0x41, 0xb0, 0x86, 0x75, 0x2c, 0xc3, 0x93, 0x32, 0x1c, 0x28, 0xf6, 0x37, 0x1c, 0x38, 0xc2, + 0x88, 0xfe, 0xc1, 0x30, 0xa4, 0x3d, 0x40, 0xd0, 0x6d, 0x18, 0x6f, 0x38, 0x6d, 0x67, 0xd3, 0x6b, + 0x7a, 0xb1, 0x47, 0xa2, 0x5e, 0x16, 0x47, 0xcb, 0x1a, 0x9e, 0x50, 0xf5, 0x6a, 0x25, 0xd8, 0xa0, + 0x83, 0x16, 0x00, 0xda, 0xa1, 0xb7, 0xe7, 0x35, 0xc9, 0x36, 0x93, 0x38, 0x30, 0xf7, 0x66, 0x6e, + 0x46, 0x23, 0x4b, 0xb1, 0x86, 0x91, 0xe1, 0xa9, 0x5a, 0x7c, 0xc8, 0x9e, 0xaa, 0x70, 0x62, 0x9e, + 0xaa, 0x43, 0x47, 0xf2, 0x54, 0x2d, 0x1d, 0xd9, 0x53, 0x75, 0x78, 0x20, 0x4f, 0x55, 0x0c, 0x67, + 0x25, 0x07, 0x47, 0xff, 0xaf, 0x7a, 0x4d, 0x22, 0xd8, 0x76, 0xee, 0x93, 0x3d, 0x77, 0xff, 0x60, + 0xfe, 0x2c, 0xce, 0xc4, 0xc0, 0x39, 0x35, 0xd1, 0xa7, 0x61, 0xd6, 0x69, 0x36, 0x83, 0xbb, 0x6a, + 0x52, 0x57, 0xa2, 0x86, 0xd3, 0xe4, 0xa2, 0xfc, 0x51, 0x46, 0xf5, 0xb1, 0xfb, 0x07, 0xf3, 0xb3, + 0x8b, 0x39, 0x38, 0x38, 0xb7, 0x36, 0xfa, 0x24, 0x94, 0xdb, 0x61, 0xd0, 0x58, 0xd3, 0xdc, 0xd4, + 0x2e, 0xd0, 0x01, 0xac, 0xc9, 0xc2, 0xc3, 0x83, 0xf9, 0x09, 0xf5, 0x87, 0x5d, 0xf8, 0x49, 0x05, + 0x7b, 0x17, 0x4e, 0xd5, 0x49, 0xe8, 0xb1, 0xac, 0xc0, 0x6e, 0x72, 0x7e, 0x6c, 0x40, 0x39, 0x4c, + 0x9d, 0x98, 0x03, 0xc5, 0x76, 0xd3, 0x62, 0x82, 0xcb, 0x13, 0x32, 0x21, 0x64, 0xff, 0x6f, 0x0b, + 0x46, 0x85, 0x47, 0xc6, 0x09, 0x30, 0x6a, 0x8b, 0x86, 0xbc, 0x7c, 0x3e, 0xfb, 0x56, 0x61, 0x9d, + 0xc9, 0x95, 0x94, 0x57, 0x53, 0x92, 0xf2, 0xc7, 0x7b, 0x11, 0xe9, 0x2d, 0x23, 0xff, 0x9b, 0x45, + 0x98, 0x34, 0x5d, 0xf7, 0x4e, 0x60, 0x08, 0xd6, 0x61, 0x34, 0x12, 0xbe, 0x69, 0x85, 0x7c, 0x8b, + 0xec, 0xf4, 0x24, 0x26, 0xd6, 0x5a, 0xc2, 0x1b, 0x4d, 0x12, 0xc9, 0x74, 0x7a, 0x2b, 0x3e, 0x44, + 0xa7, 0xb7, 0x7e, 0xde, 0x93, 0x43, 0xc7, 0xe1, 0x3d, 0x69, 0x7f, 0x99, 0xdd, 0x6c, 0x7a, 0xf9, + 0x09, 0x30, 0x3d, 0x57, 0xcd, 0x3b, 0xd0, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0x98, 0x9f, 0x9f, + 0xb7, 0xe0, 0x7c, 0xc6, 0x57, 0x69, 0x9c, 0xd0, 0x33, 0x50, 0x72, 0x3a, 0xae, 0xa7, 0xf6, 0xb2, + 0xa6, 0x35, 0x5b, 0x14, 0xe5, 0x58, 0x61, 0xa0, 0x65, 0x98, 0x21, 0xf7, 0xda, 0x1e, 0x57, 0x18, + 0xea, 0x26, 0x95, 0x45, 0x1e, 0xef, 0x7a, 0x25, 0x0d, 0xc4, 0xdd, 0xf8, 0x2a, 0xd8, 0x43, 0x31, + 0x37, 0xd8, 0xc3, 0xdf, 0xb7, 0x60, 0x4c, 0x79, 0x67, 0x3d, 0xf4, 0xd1, 0xfe, 0x26, 0x73, 0xb4, + 0x1f, 0xed, 0x31, 0xda, 0x39, 0xc3, 0xfc, 0xb7, 0x0b, 0xaa, 0xbf, 0xb5, 0x20, 0x8c, 0x07, 0xe0, + 0xb0, 0x5e, 0x81, 0x52, 0x3b, 0x0c, 0xe2, 0xa0, 0x11, 0x34, 0x05, 0x83, 0xf5, 0x58, 0x12, 0x8b, + 0x84, 0x97, 0x1f, 0x6a, 0xbf, 0xb1, 0xc2, 0x66, 0xa3, 0x17, 0x84, 0xb1, 0x60, 0x6a, 0x92, 0xd1, + 0x0b, 0xc2, 0x18, 0x33, 0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xb1, 0x8f, + 0xf2, 0x0f, 0x8f, 0x4e, 0xec, 0x35, 0x17, 0x3c, 0x3f, 0x8e, 0xe2, 0x70, 0xa1, 0xea, 0xc7, 0x37, + 0x43, 0xfe, 0x5e, 0xd3, 0x82, 0x8b, 0x28, 0x5a, 0x58, 0xa3, 0x2b, 0xdd, 0x8a, 0x59, 0x1b, 0xc3, + 0xa6, 0xfe, 0x7d, 0x5d, 0x94, 0x63, 0x85, 0x61, 0xbf, 0xcc, 0xae, 0x12, 0x36, 0x40, 0x47, 0x8b, + 0xfb, 0xf1, 0x9d, 0x65, 0x35, 0xb4, 0x4c, 0xf9, 0x56, 0xd1, 0xa3, 0x8b, 0xf4, 0x3e, 0xb9, 0x69, + 0xc3, 0xba, 0x8b, 0x51, 0x12, 0x82, 0x04, 0x7d, 0x73, 0x97, 0x4d, 0xc5, 0xb3, 0x7d, 0xae, 0x80, + 0x23, 0x58, 0x51, 0xb0, 0x18, 0xfc, 0x2c, 0x42, 0x79, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0x83, 0x5f, + 0x00, 0x70, 0x82, 0x83, 0xae, 0x88, 0xd7, 0xf8, 0x90, 0x91, 0x79, 0x52, 0xbe, 0xc6, 0xe5, 0xe7, + 0x6b, 0xc2, 0xec, 0xe7, 0x60, 0x4c, 0x65, 0xa0, 0xac, 0xf1, 0xc4, 0x86, 0x22, 0x12, 0xd4, 0x4a, + 0x52, 0x8c, 0x75, 0x1c, 0xb4, 0x01, 0x53, 0x11, 0x17, 0xf5, 0xa8, 0x80, 0x9f, 0x5c, 0x64, 0xf6, + 0x51, 0x69, 0x88, 0x52, 0x37, 0xc1, 0x87, 0xac, 0x88, 0x1f, 0x1d, 0xd2, 0x95, 0x37, 0x4d, 0x02, + 0xbd, 0x06, 0x93, 0xcd, 0xc0, 0x71, 0x97, 0x9c, 0xa6, 0xe3, 0x37, 0xd8, 0xf7, 0x96, 0xcc, 0x44, + 0x66, 0x37, 0x0c, 0x28, 0x4e, 0x61, 0x53, 0xce, 0x47, 0x2f, 0x11, 0x41, 0x6a, 0x1d, 0x7f, 0x9b, + 0x44, 0x22, 0x9f, 0x20, 0xe3, 0x7c, 0x6e, 0xe4, 0xe0, 0xe0, 0xdc, 0xda, 0xe8, 0x15, 0x18, 0x97, + 0x9f, 0xaf, 0x79, 0xbe, 0x27, 0xb6, 0xf7, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x2e, 0x9c, 0x91, 0xff, + 0x37, 0x42, 0x67, 0x6b, 0xcb, 0x6b, 0x08, 0x77, 0x50, 0xee, 0x18, 0xb7, 0x28, 0xbd, 0xb7, 0x56, + 0xb2, 0x90, 0x0e, 0x0f, 0xe6, 0x2f, 0x8a, 0x51, 0xcb, 0x84, 0xb3, 0x49, 0xcc, 0xa6, 0x8f, 0xd6, + 0xe0, 0xd4, 0x0e, 0x71, 0x9a, 0xf1, 0xce, 0xf2, 0x0e, 0x69, 0xec, 0xca, 0x4d, 0xc4, 0xfc, 0xe9, + 0x35, 0x8b, 0xf5, 0x6b, 0xdd, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0x66, 0xdb, 0x9d, 0xcd, 0xa6, + 0x17, 0xed, 0xac, 0x07, 0x31, 0xb3, 0x46, 0x51, 0x09, 0x2d, 0x85, 0xe3, 0xbd, 0x8a, 0x58, 0x50, + 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0xbb, 0x70, 0x26, 0xb5, 0x18, 0x84, 0xeb, 0xf1, 0x64, 0x7e, + 0xc8, 0xef, 0x7a, 0x56, 0x05, 0xe1, 0xc5, 0x9f, 0x05, 0xc2, 0xd9, 0x4d, 0xa0, 0x17, 0xa1, 0xe4, + 0xb5, 0x57, 0x9d, 0x96, 0xd7, 0xdc, 0x67, 0x31, 0xcb, 0xcb, 0x2c, 0x8e, 0x77, 0xa9, 0x5a, 0xe3, + 0x65, 0x87, 0xda, 0x6f, 0xac, 0x30, 0xdf, 0x9b, 0x35, 0xd2, 0x3b, 0xb4, 0xb2, 0xc6, 0xca, 0xa1, + 0xcf, 0xc3, 0xb8, 0xbe, 0xf6, 0xc4, 0xb5, 0x74, 0x29, 0x9b, 0xd3, 0xd1, 0xd6, 0x28, 0x67, 0x04, + 0xd5, 0x3a, 0xd4, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0x1e, 0x15, 0x74, 0x03, 0x4a, 0x8d, 0xa6, + 0x47, 0xfc, 0xb8, 0x5a, 0xeb, 0x15, 0x88, 0x68, 0x59, 0xe0, 0x88, 0x61, 0x16, 0x91, 0x95, 0x79, + 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2d, 0xc0, 0x7c, 0x9f, 0x30, 0xdd, 0x29, 0xa1, 0xb9, 0x35, 0x90, + 0xd0, 0x7c, 0x51, 0x26, 0xf5, 0x5c, 0x4f, 0x49, 0x12, 0x52, 0x09, 0x3b, 0x13, 0x79, 0x42, 0x1a, + 0x7f, 0x60, 0x23, 0x66, 0x5d, 0xee, 0x3e, 0xd4, 0xd7, 0x0c, 0xdf, 0xd0, 0xb7, 0x0d, 0x0f, 0xfe, + 0x7c, 0xc9, 0xd5, 0x9d, 0xd8, 0x5f, 0x2e, 0xc0, 0x19, 0x35, 0x84, 0x5f, 0xbf, 0x03, 0x77, 0xab, + 0x7b, 0xe0, 0x8e, 0x41, 0xf3, 0x64, 0xdf, 0x84, 0x11, 0x1e, 0x59, 0x69, 0x00, 0xb6, 0xe9, 0x09, + 0x33, 0x34, 0xa0, 0xba, 0xdc, 0x8d, 0xf0, 0x80, 0xdf, 0x63, 0xc1, 0xd4, 0xc6, 0x72, 0xad, 0x1e, + 0x34, 0x76, 0x49, 0xbc, 0xc8, 0xd9, 0x5c, 0x2c, 0xb8, 0x26, 0xeb, 0x01, 0xb9, 0xa1, 0x2c, 0x3e, + 0xeb, 0x22, 0x0c, 0xed, 0x04, 0x51, 0x9c, 0x56, 0x4b, 0x5f, 0x0b, 0xa2, 0x18, 0x33, 0x88, 0xfd, + 0x3b, 0x16, 0x0c, 0xb3, 0x3c, 0xd6, 0xfd, 0x32, 0xa9, 0x0f, 0xf2, 0x5d, 0xe8, 0x25, 0x18, 0x21, + 0x5b, 0x5b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0x7e, 0xc4, 0x23, 0x2b, 0xac, 0x94, 0xb2, 0x0a, 0xac, + 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x07, 0xca, 0xb1, 0xd7, 0x22, 0x8b, 0xae, 0x2b, 0x14, 0x7b, + 0x0f, 0xe0, 0x0b, 0xbd, 0x21, 0x09, 0xe0, 0x84, 0x96, 0xfd, 0xa5, 0x02, 0x40, 0x12, 0x57, 0xa3, + 0xdf, 0x27, 0x2e, 0x75, 0xa9, 0x7c, 0x2e, 0x65, 0xa8, 0x7c, 0x50, 0x42, 0x30, 0x43, 0xdf, 0xa3, + 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x0d, 0x1d, 0x65, 0x98, 0x96, 0x61, 0x26, 0x89, 0x0b, 0x62, 0x86, + 0x45, 0x62, 0x4f, 0x9b, 0x8d, 0x34, 0x10, 0x77, 0xe3, 0xdb, 0x04, 0x2e, 0xaa, 0xf0, 0x08, 0xe2, + 0xae, 0x61, 0x76, 0xa3, 0x47, 0x48, 0xaa, 0x9f, 0xe8, 0xb4, 0x0a, 0xb9, 0x3a, 0xad, 0x1f, 0xb3, + 0xe0, 0x74, 0xba, 0x1d, 0xe6, 0xc8, 0xf7, 0x45, 0x0b, 0xce, 0x30, 0xcd, 0x1e, 0x6b, 0xb5, 0x5b, + 0x8f, 0xf8, 0x62, 0xcf, 0x90, 0x0f, 0x39, 0x3d, 0x4e, 0x1c, 0xd6, 0xd7, 0xb2, 0x48, 0xe3, 0xec, + 0x16, 0xed, 0xff, 0x58, 0x80, 0xd9, 0xbc, 0x58, 0x11, 0xcc, 0xac, 0xdc, 0xb9, 0x57, 0xdf, 0x25, + 0x77, 0x85, 0xf1, 0x6e, 0x62, 0x56, 0xce, 0x8b, 0xb1, 0x84, 0xa7, 0x23, 0x2f, 0x17, 0x06, 0x8b, + 0xbc, 0x8c, 0x76, 0x60, 0xe6, 0xee, 0x0e, 0xf1, 0x6f, 0xf9, 0x91, 0x13, 0x7b, 0xd1, 0x96, 0xc7, + 0x32, 0xa2, 0xf3, 0x75, 0xf3, 0x09, 0x69, 0x62, 0x7b, 0x27, 0x8d, 0x70, 0x78, 0x30, 0x7f, 0xde, + 0x28, 0x48, 0xba, 0xcc, 0x0f, 0x12, 0xdc, 0x4d, 0xb4, 0x3b, 0x70, 0xf5, 0xd0, 0x43, 0x0c, 0x5c, + 0x6d, 0x7f, 0xd1, 0x82, 0x73, 0xb9, 0x89, 0xe5, 0xd0, 0x65, 0x28, 0x39, 0x6d, 0x8f, 0x8b, 0x40, + 0xc5, 0x31, 0xca, 0x9e, 0xf2, 0xb5, 0x2a, 0x17, 0x80, 0x2a, 0xa8, 0x4a, 0x78, 0x5b, 0xc8, 0x4d, + 0x78, 0xdb, 0x37, 0x7f, 0xad, 0xfd, 0xdd, 0x16, 0x08, 0x97, 0xb8, 0x01, 0xce, 0xee, 0x37, 0x65, + 0xbe, 0x70, 0x23, 0xb9, 0xc5, 0xc5, 0x7c, 0x1f, 0x41, 0x91, 0xd2, 0x42, 0xf1, 0x4a, 0x46, 0x22, + 0x0b, 0x83, 0x96, 0xed, 0x82, 0x80, 0x56, 0x08, 0x13, 0x20, 0xf6, 0xef, 0xcd, 0xf3, 0x00, 0x2e, + 0xc3, 0xd5, 0xb2, 0x06, 0xab, 0x9b, 0xb9, 0xa2, 0x20, 0x58, 0xc3, 0xb2, 0xff, 0x6d, 0x01, 0xc6, + 0x64, 0x32, 0x85, 0x8e, 0x3f, 0xc8, 0x33, 0xff, 0x48, 0xd9, 0xd5, 0x58, 0x9a, 0x6d, 0x4a, 0xb8, + 0x96, 0x48, 0x47, 0x92, 0x34, 0xdb, 0x12, 0x80, 0x13, 0x1c, 0xba, 0x8b, 0xa2, 0xce, 0x26, 0x43, + 0x4f, 0x39, 0x70, 0xd5, 0x79, 0x31, 0x96, 0x70, 0xf4, 0x69, 0x98, 0xe6, 0xf5, 0xc2, 0xa0, 0xed, + 0x6c, 0x73, 0xd9, 0xf2, 0xb0, 0xf2, 0xbc, 0x9e, 0x5e, 0x4b, 0xc1, 0x0e, 0x0f, 0xe6, 0x4f, 0xa7, + 0xcb, 0x98, 0xd2, 0xa4, 0x8b, 0x0a, 0x33, 0xc4, 0xe0, 0x8d, 0xd0, 0xdd, 0xdf, 0x65, 0xbf, 0x91, + 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0x1e, 0x50, 0x77, 0x5a, 0x09, 0xf4, 0x3a, 0xb7, 0xbe, 0xf3, 0x42, + 0xe2, 0xf6, 0x52, 0xa2, 0xe8, 0xfe, 0xc5, 0xd2, 0xf7, 0x82, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0x57, + 0x8a, 0x30, 0x9d, 0xf6, 0x36, 0x45, 0xd7, 0x60, 0x84, 0xb3, 0x1e, 0x82, 0x7c, 0x0f, 0x1d, 0xbd, + 0xe6, 0xa3, 0xca, 0x0e, 0x61, 0xc1, 0xbd, 0x88, 0xfa, 0xe8, 0x2d, 0x18, 0x73, 0x83, 0xbb, 0xfe, + 0x5d, 0x27, 0x74, 0x17, 0x6b, 0x55, 0xb1, 0x9c, 0x33, 0xdf, 0x3d, 0x95, 0x04, 0x4d, 0xf7, 0x7b, + 0x65, 0xfa, 0xa8, 0x04, 0x84, 0x75, 0x72, 0x68, 0x83, 0x45, 0xc1, 0xdd, 0xf2, 0xb6, 0xd7, 0x9c, + 0x76, 0x2f, 0x53, 0xec, 0x65, 0x89, 0xa4, 0x51, 0x9e, 0x10, 0xa1, 0x72, 0x39, 0x00, 0x27, 0x84, + 0xd0, 0xb7, 0xc2, 0xa9, 0x28, 0x47, 0x54, 0x9a, 0x97, 0x65, 0xa8, 0x97, 0xf4, 0x70, 0xe9, 0x11, + 0xfa, 0x22, 0xcd, 0x12, 0xaa, 0x66, 0x35, 0x63, 0xff, 0xda, 0x29, 0x30, 0x36, 0xb1, 0x91, 0x74, + 0xce, 0x3a, 0xa6, 0xa4, 0x73, 0x18, 0x4a, 0xa4, 0xd5, 0x8e, 0xf7, 0x2b, 0x5e, 0xd8, 0x2b, 0x6b, + 0xe9, 0x8a, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0xce, 0x0c, 0x58, 0xfc, 0x1a, 0x66, + 0x06, 0x1c, 0x3a, 0xc1, 0xcc, 0x80, 0xeb, 0x30, 0xba, 0xed, 0xc5, 0x98, 0xb4, 0x03, 0xc1, 0xf4, + 0x67, 0xae, 0xc3, 0xab, 0x1c, 0xa5, 0x3b, 0x07, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xae, 0x76, + 0xe0, 0x48, 0xfe, 0x9b, 0xb9, 0x5b, 0x99, 0x9c, 0xb9, 0x07, 0x45, 0xfe, 0xbf, 0xd1, 0x07, 0xcd, + 0xff, 0xb7, 0x2a, 0xb3, 0xf6, 0x95, 0xf2, 0xfd, 0x26, 0x58, 0x52, 0xbe, 0x3e, 0xb9, 0xfa, 0x6e, + 0xeb, 0x99, 0x0e, 0xcb, 0xf9, 0x27, 0x81, 0x4a, 0x62, 0x38, 0x60, 0x7e, 0xc3, 0xef, 0xb6, 0xe0, + 0x4c, 0x3b, 0x2b, 0xe9, 0xa7, 0xd0, 0xbb, 0xbe, 0x34, 0x70, 0x56, 0x53, 0xa3, 0x41, 0x26, 0x72, + 0xc9, 0x44, 0xc3, 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x37, 0x5d, 0x91, 0xa0, 0xef, 0x89, 0x9c, 0x44, + 0x89, 0x3d, 0xd2, 0x23, 0x6e, 0x64, 0x24, 0xe5, 0xfb, 0x70, 0x5e, 0x52, 0xbe, 0x81, 0x53, 0xf1, + 0xbd, 0xae, 0x52, 0x24, 0x4e, 0xe4, 0x2f, 0x25, 0x9e, 0x00, 0xb1, 0x6f, 0x62, 0xc4, 0xd7, 0x55, + 0x62, 0xc4, 0x1e, 0x11, 0x21, 0x79, 0xda, 0xc3, 0xbe, 0xe9, 0x10, 0xb5, 0x94, 0x86, 0x53, 0xc7, + 0x93, 0xd2, 0xd0, 0xb8, 0x6a, 0x78, 0x56, 0xbd, 0xa7, 0xfb, 0x5c, 0x35, 0x06, 0xdd, 0xde, 0x97, + 0x0d, 0x4f, 0xdf, 0x38, 0xf3, 0x40, 0xe9, 0x1b, 0x6f, 0xeb, 0xe9, 0x10, 0x51, 0x9f, 0x7c, 0x7f, + 0x14, 0x69, 0xc0, 0x24, 0x88, 0xb7, 0xf5, 0x0b, 0xf0, 0x54, 0x3e, 0x5d, 0x75, 0xcf, 0x75, 0xd3, + 0xcd, 0xbc, 0x02, 0xbb, 0x92, 0x2b, 0x9e, 0x3e, 0x99, 0xe4, 0x8a, 0x67, 0x8e, 0x3d, 0xb9, 0xe2, + 0xd9, 0x13, 0x48, 0xae, 0xf8, 0xc8, 0x09, 0x26, 0x57, 0xbc, 0xcd, 0x8c, 0x15, 0x78, 0x60, 0x11, + 0x11, 0xc1, 0x32, 0x3b, 0x5a, 0x62, 0x56, 0xf4, 0x11, 0xfe, 0x71, 0x0a, 0x84, 0x13, 0x52, 0x19, + 0x49, 0x1b, 0x67, 0x1f, 0x42, 0xd2, 0xc6, 0xf5, 0x24, 0x69, 0xe3, 0xb9, 0xfc, 0xa9, 0xce, 0x30, + 0x12, 0xcf, 0x49, 0xd5, 0x78, 0x5b, 0x4f, 0xb1, 0xf8, 0x68, 0x0f, 0xa1, 0x7a, 0x96, 0xe0, 0xb1, + 0x47, 0x62, 0xc5, 0xd7, 0x78, 0x62, 0xc5, 0xc7, 0xf2, 0x4f, 0xf2, 0xf4, 0x75, 0x67, 0xa6, 0x53, + 0xfc, 0xde, 0x02, 0x5c, 0xe8, 0xbd, 0x2f, 0x12, 0xa9, 0x67, 0x2d, 0xd1, 0xed, 0xa5, 0xa4, 0x9e, + 0xfc, 0x6d, 0x95, 0x60, 0x0d, 0x1c, 0x73, 0xea, 0x2a, 0xcc, 0x28, 0x2b, 0xf0, 0xa6, 0xd7, 0xd8, + 0xd7, 0x32, 0xc8, 0x2b, 0xcf, 0xd9, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x8b, 0x30, 0x65, 0x14, + 0x56, 0x2b, 0xe2, 0x0d, 0xa5, 0xc4, 0xac, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0x4f, 0x59, 0xf0, + 0x48, 0x4e, 0xde, 0xa2, 0x81, 0x43, 0x2a, 0x6d, 0xc1, 0x54, 0xdb, 0xac, 0xda, 0x27, 0xf2, 0x9a, + 0x91, 0x1d, 0x49, 0xf5, 0x35, 0x05, 0xc0, 0x69, 0xa2, 0xf6, 0x9f, 0x5a, 0x70, 0xbe, 0xa7, 0x41, + 0x16, 0xc2, 0x70, 0x76, 0xbb, 0x15, 0x39, 0xcb, 0x21, 0x71, 0x89, 0x1f, 0x7b, 0x4e, 0xb3, 0xde, + 0x26, 0x0d, 0x4d, 0x6e, 0xcd, 0x2c, 0x9b, 0xae, 0xae, 0xd5, 0x17, 0xbb, 0x31, 0x70, 0x4e, 0x4d, + 0xb4, 0x0a, 0xa8, 0x1b, 0x22, 0x66, 0x98, 0x45, 0x67, 0xed, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x97, + 0x61, 0x42, 0x19, 0x7a, 0x69, 0x33, 0xce, 0x0e, 0x60, 0xac, 0x03, 0xb0, 0x89, 0xb7, 0x74, 0xf9, + 0x37, 0x7e, 0xef, 0xc2, 0x87, 0x7e, 0xeb, 0xf7, 0x2e, 0x7c, 0xe8, 0xb7, 0x7f, 0xef, 0xc2, 0x87, + 0xbe, 0xfd, 0xfe, 0x05, 0xeb, 0x37, 0xee, 0x5f, 0xb0, 0x7e, 0xeb, 0xfe, 0x05, 0xeb, 0xb7, 0xef, + 0x5f, 0xb0, 0x7e, 0xf7, 0xfe, 0x05, 0xeb, 0x4b, 0xbf, 0x7f, 0xe1, 0x43, 0x6f, 0x16, 0xf6, 0x9e, + 0xfb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x11, 0xe2, 0x4d, 0x14, 0xfc, 0x00, 0x00, +} + +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n68, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n68 + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n69, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n69 - return i, nil + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { +func (m *Affinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.Max) > 0 { - keysForMax := make([]string, 0, len(m.Max)) - for k := range m.Max { - keysForMax = append(keysForMax, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - for _, k := range keysForMax { - dAtA[i] = 0x12 - i++ - v := m.Max[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n70, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n70 - } - } - if len(m.Min) > 0 { - keysForMin := make([]string, 0, len(m.Min)) - for k := range m.Min { - keysForMin = append(keysForMin, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - for _, k := range keysForMin { - dAtA[i] = 0x1a - i++ - v := m.Min[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n71 - } - } - if len(m.Default) > 0 { - keysForDefault := make([]string, 0, len(m.Default)) - for k := range m.Default { - keysForDefault = append(keysForDefault, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - for _, k := range keysForDefault { - dAtA[i] = 0x22 - i++ - v := m.Default[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(dAtA[i:]) + if m.PodAntiAffinity != nil { + { + size, err := m.PodAntiAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n72 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.DefaultRequest) > 0 { - keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) - for k := range m.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - for _, k := range keysForDefaultRequest { - dAtA[i] = 0x2a - i++ - v := m.DefaultRequest[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n73, err := (&v).MarshalTo(dAtA[i:]) + if m.PodAffinity != nil { + { + size, err := m.PodAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n73 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.MaxLimitRequestRatio) > 0 { - keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) - for k := range m.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - for _, k := range keysForMaxLimitRequestRatio { - dAtA[i] = 0x32 - i++ - v := m.MaxLimitRequestRatio[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n74, err := (&v).MarshalTo(dAtA[i:]) + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n74 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { +func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttachedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n75, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n75 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + return len(dAtA) - i, nil } -func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { +func (m *AvoidPods) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - for _, msg := range m.Limits { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *List) Marshal() (dAtA []byte, err error) { +func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AvoidPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n76, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n76 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.PreferAvoidPods) > 0 { + for iNdEx := len(m.PreferAvoidPods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferAvoidPods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { +func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - return i, nil -} - -func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { +func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.Kind != nil { + i -= len(*m.Kind) + copy(dAtA[i:], *m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i-- + dAtA[i] = 0x32 + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x22 + } + if m.CachingMode != nil { + i -= len(*m.CachingMode) + copy(dAtA[i:], *m.CachingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.DataDiskURI) + copy(dAtA[i:], m.DataDiskURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) + i-- + dAtA[i] = 0x12 + i -= len(m.DiskName) + copy(dAtA[i:], m.DiskName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { +func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureFilePersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FSType != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) + if m.SecretNamespace != nil { + i -= len(*m.SecretNamespace) + copy(dAtA[i:], *m.SecretNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureFileVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) - i += copy(dAtA[i:], m.Server) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Namespace) Marshal() (dAtA []byte, err error) { +func (m *Binding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Binding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Binding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n77, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n77 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n78, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n78 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n79, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n79 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceList) Marshal() (dAtA []byte, err error) { +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n80, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ControllerExpandSecretRef != nil { + { + size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - i += n80 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - return i, nil -} - -func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + if m.NodeStageSecretRef != nil { + { + size, err := m.NodeStageSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - return dAtA[:n], nil -} - -func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ControllerPublishSecretRef != nil { + { + size, err := m.ControllerPublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } } - return i, nil + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.VolumeHandle) + copy(dAtA[i:], m.VolumeHandle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { +func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x1a + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - return i, nil + return len(dAtA) - i, nil } -func (m *Node) Marshal() (dAtA []byte, err error) { +func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Node) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Capabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n81, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n81 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n82, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Drop) > 0 { + for iNdEx := len(m.Drop) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Drop[iNdEx]) + copy(dAtA[i:], m.Drop[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Drop[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - i += n82 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n83, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Add) > 0 { + for iNdEx := len(m.Add) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Add[iNdEx]) + copy(dAtA[i:], m.Add[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Add[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i += n83 - return i, nil + return len(dAtA) - i, nil } -func (m *NodeAddress) Marshal() (dAtA []byte, err error) { +func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) - i += copy(dAtA[i:], m.Address) - return i, nil + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n84, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n84 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *NodeCondition) Marshal() (dAtA []byte, err error) { +func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n85, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n85 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n86, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n86 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { +func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ConfigMap != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n87, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n87 + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { +func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Assigned != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) - n88, err := m.Assigned.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n88 - } - if m.Active != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) - n89, err := m.Active.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n89 - } - if m.LastKnownGood != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) - n90, err := m.LastKnownGood.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n90 + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil + return len(dAtA) - i, nil } -func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { +func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n91, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n91 - return i, nil + return len(dAtA) - i, nil } -func (m *NodeList) Marshal() (dAtA []byte, err error) { +func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n92, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n92 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatusList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil + return len(dAtA) - i, nil } -func (m *NodeResources) Marshal() (dAtA []byte, err error) { +func (m *ConfigMap) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if len(m.BinaryData) > 0 { + keysForBinaryData := make([]string, 0, len(m.BinaryData)) + for k := range m.BinaryData { + keysForBinaryData = append(keysForBinaryData, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + for iNdEx := len(keysForBinaryData) - 1; iNdEx >= 0; iNdEx-- { + v := m.BinaryData[string(keysForBinaryData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForBinaryData[iNdEx]) + copy(dAtA[i:], keysForBinaryData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBinaryData[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n93, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n93 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelector) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, msg := range m.NodeSelectorTerms { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.MatchFields) > 0 { - for _, msg := range m.MatchFields { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSpec) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapNodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) - i += copy(dAtA[i:], m.PodCIDR) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) - i += copy(dAtA[i:], m.DoNotUse_ExternalID) + i -= len(m.KubeletConfigKey) + copy(dAtA[i:], m.KubeletConfigKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) + i-- + dAtA[i] = 0x2a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) - i += copy(dAtA[i:], m.ProviderID) - dAtA[i] = 0x20 - i++ - if m.Unschedulable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i++ - if len(m.Taints) > 0 { - for _, msg := range m.Taints { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.ConfigSource != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n94, err := m.ConfigSource.MarshalTo(dAtA[i:]) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n94 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeStatus) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n95, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n95 + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if len(m.Allocatable) > 0 { - keysForAllocatable := make([]string, 0, len(m.Allocatable)) - for k := range m.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - for _, k := range keysForAllocatable { - dAtA[i] = 0x12 - i++ - v := m.Allocatable[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n96, err := (&v).MarshalTo(dAtA[i:]) + } + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Container) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n96 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } } - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n97, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n97 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n98, err := m.NodeInfo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n98 - if len(m.Images) > 0 { - for _, msg := range m.Images { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x7a } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x62 } - if len(m.VolumesAttached) > 0 { - for _, msg := range m.VolumesAttached { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.Config != nil { + i-- dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) - n99, err := m.Config.MarshalTo(dAtA[i:]) + } + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n99 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - return i, nil + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { +func (m *ContainerImage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) - i += copy(dAtA[i:], m.MachineID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) - i += copy(dAtA[i:], m.SystemUUID) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) - i += copy(dAtA[i:], m.BootID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) - i += copy(dAtA[i:], m.KernelVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) - i += copy(dAtA[i:], m.OSImage) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) - i += copy(dAtA[i:], m.ContainerRuntimeVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) - i += copy(dAtA[i:], m.KubeletVersion) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) - i += copy(dAtA[i:], m.KubeProxyVersion) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) - i += copy(dAtA[i:], m.OperatingSystem) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) - i += copy(dAtA[i:], m.Architecture) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) + i-- + dAtA[i] = 0x10 + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *ContainerPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil + return len(dAtA) - i, nil } -func (m *ObjectReference) Marshal() (dAtA []byte, err error) { +func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil + if m.Terminated != nil { + { + size, err := m.Terminated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Running != nil { + { + size, err := m.Running.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Waiting != nil { + { + size, err := m.Waiting.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n100, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n100 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n101, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n101 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n102, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n102 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateTerminated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n103, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x3a + { + size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n103 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n104, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x32 + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n104 + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n105, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n105 - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateWaiting) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n106, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n106 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n107, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n107 - dAtA[i] = 0x2a - i++ + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n108, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Started != nil { + i-- + if *m.Started { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 } - i += n108 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x42 + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0x3a + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) + i-- + dAtA[i] = 0x28 + i-- + if m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.LastTerminationState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { +func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonEndpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n109, err := m.Resources.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err - } - i += n109 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n110, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n110 - } - if m.StorageClassName != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) - i += copy(dAtA[i:], *m.StorageClassName) - } - if m.VolumeMode != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) + return nil, err } - if m.DataSource != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) - n111, err := m.DataSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n111 } - return i, nil + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { +func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x20 } - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0x1a - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n112, err := (&v).MarshalTo(dAtA[i:]) + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n112 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) - i += copy(dAtA[i:], m.ClaimName) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - return i, nil + return len(dAtA) - i, nil } -func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { +func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmptyDirVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n113, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n113 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.SizeLimit != nil { + { + size, err := m.SizeLimit.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Medium) + copy(dAtA[i:], m.Medium) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GCEPersistentDisk != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n114, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n114 + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x22 } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n115, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n115 - } - if m.HostPath != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n116, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n116 - } - if m.Glusterfs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n117, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n117 - } - if m.NFS != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n118, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n118 - } - if m.RBD != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n119, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n119 - } - if m.ISCSI != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n120, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n120 - } - if m.Cinder != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n121, err := m.Cinder.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n121 - } - if m.CephFS != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n122, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n122 - } - if m.FC != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n123, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n123 - } - if m.Flocker != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n124, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n124 - } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n125, err := m.FlexVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n125 - } - if m.AzureFile != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n126, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n126 - } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n127, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n127 - } - if m.Quobyte != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n128, err := m.Quobyte.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n128 - } - if m.AzureDisk != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n129, err := m.AzureDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n129 - } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n130, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n130 - } - if m.PortworxVolume != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n131, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n131 - } - if m.ScaleIO != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n132, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n132 - } - if m.Local != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n133, err := m.Local.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n133 - } - if m.StorageOS != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n134, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n134 - } - if m.CSI != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n135, err := m.CSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n135 - } - return i, nil -} - -func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n136, err := (&v).MarshalTo(dAtA[i:]) + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n136 - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n137, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n137 - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.ClaimRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n138, err := m.ClaimRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n138 - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) - i += copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) - i += copy(dAtA[i:], m.StorageClassName) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.VolumeMode != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) - } - if m.NodeAffinity != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n139, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n139 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - return i, nil -} - -func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) - i += copy(dAtA[i:], m.PdID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - return i, nil + return len(dAtA) - i, nil } -func (m *Pod) Marshal() (dAtA []byte, err error) { +func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Pod) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSubset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - i += n140 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n141, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.NotReadyAddresses) > 0 { + for iNdEx := len(m.NotReadyAddresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NotReadyAddresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n141 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n142, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i += n142 - return i, nil + return len(dAtA) - i, nil } -func (m *PodAffinity) Marshal() (dAtA []byte, err error) { +func (m *Endpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Subsets) > 0 { + for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { +func (m *EndpointsList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.LabelSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n143, err := m.LabelSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n143 } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) - i += copy(dAtA[i:], m.TopologyKey) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { +func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvFromSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ConfigMapRef != nil { + { + size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { +func (m *EnvVar) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - return i, nil -} - -func (m *PodCondition) Marshal() (dAtA []byte, err error) { +func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvVar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n144, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n144 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n145, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ValueFrom != nil { + { + size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n145 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { +func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.SecretKeyRef != nil { + { + size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ConfigMapKeyRef != nil { + { + size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Options) > 0 { - for _, msg := range m.Options { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) - i += copy(dAtA[i:], *m.Value) + i -= len(m.TargetContainerName) + copy(dAtA[i:], m.TargetContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetContainerName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.EphemeralContainerCommon.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainerCommon) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainerCommon) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + i-- + if m.TTY { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x7a } - return i, nil -} - -func (m *PodList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n146, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n146 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x62 } - return i, nil -} - -func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - dAtA[i] = 0x10 - i++ - if m.Follow { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - i++ - dAtA[i] = 0x18 - i++ - if m.Previous { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - i++ - if m.SinceSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } } - if m.SinceTime != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n147, err := m.SinceTime.MarshalTo(dAtA[i:]) + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n147 - } - dAtA[i] = 0x30 - i++ - if m.Timestamps { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.TailLines != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil -} - -func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - return dAtA[:n], nil -} - -func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l if len(m.Ports) > 0 { - for _, num := range m.Ports { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - return i, nil -} - -func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - return dAtA[:n], nil -} - -func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil + return len(dAtA) - i, nil } -func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainers) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) - i += copy(dAtA[i:], m.ConditionType) - return i, nil -} - -func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainers) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.SELinuxOptions != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n148, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n148 - } - if m.RunAsUser != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x18 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.SupplementalGroups) > 0 { - for _, num := range m.SupplementalGroups { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) - } - } - if m.FSGroup != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if len(m.Sysctls) > 0 { - for _, msg := range m.Sysctls { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil -} - -func (m *PodSignature) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PodController != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n149, err := m.PodController.MarshalTo(dAtA[i:]) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n149 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSpec) Marshal() (dAtA []byte, err error) { +func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x7a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x72 + if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x6a } - if len(m.Containers) > 0 { - for _, msg := range m.Containers { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x62 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) - i += copy(dAtA[i:], m.RestartPolicy) - if m.TerminationGracePeriodSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) - i += copy(dAtA[i:], m.DNSPolicy) - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for _, k := range keysForNodeSelector { - dAtA[i] = 0x3a - i++ - v := m.NodeSelector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i += copy(dAtA[i:], m.ServiceAccountName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) - i += copy(dAtA[i:], m.DeprecatedServiceAccount) + i-- dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - dAtA[i] = 0x58 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x60 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecurityContext != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n150, err := m.SecurityContext.MarshalTo(dAtA[i:]) + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x4a + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x40 + { + size, err := m.LastTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n150 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) - i += copy(dAtA[i:], m.Subdomain) - if m.Affinity != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n151, err := m.Affinity.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x3a + { + size, err := m.FirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n151 - } - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i += copy(dAtA[i:], m.SchedulerName) - if len(m.InitContainers) > 0 { - for _, msg := range m.InitContainers { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Tolerations) > 0 { - for _, msg := range m.Tolerations { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.HostAliases) > 0 { - for _, msg := range m.HostAliases { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) - i += copy(dAtA[i:], m.PriorityClassName) - if m.Priority != nil { - dAtA[i] = 0xc8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.DNSConfig != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n152, err := m.DNSConfig.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x32 + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n152 - } - if m.ShareProcessNamespace != nil { - dAtA[i] = 0xd8 - i++ - dAtA[i] = 0x1 - i++ - if *m.ShareProcessNamespace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ReadinessGates) > 0 { - for _, msg := range m.ReadinessGates { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.InvolvedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RuntimeClassName != nil { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) - i += copy(dAtA[i:], *m.RuntimeClassName) - } - if m.EnableServiceLinks != nil { - dAtA[i] = 0xf0 - i++ - dAtA[i] = 0x1 - i++ - if *m.EnableServiceLinks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatus) Marshal() (dAtA []byte, err error) { +func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) - i += copy(dAtA[i:], m.PodIP) - if m.StartTime != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n153, err := m.StartTime.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n153 - } - if len(m.ContainerStatuses) > 0 { - for _, msg := range m.ContainerStatuses { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) - i += copy(dAtA[i:], m.QOSClass) - if len(m.InitContainerStatuses) > 0 { - for _, msg := range m.InitContainerStatuses { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) - i += copy(dAtA[i:], m.NominatedNodeName) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { +func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n154, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n154 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n155, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n155 - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *PodTemplate) Marshal() (dAtA []byte, err error) { +func (m *EventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n156, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n156 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n157, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n157 - return i, nil + i -= len(m.Component) + copy(dAtA[i:], m.Component) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { +func (m *ExecAction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n158, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n158 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { +func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n159 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n160, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n160 - return i, nil -} - -func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ + if len(m.WWIDs) > 0 { + for iNdEx := len(m.WWIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.WWIDs[iNdEx]) + copy(dAtA[i:], m.WWIDs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WWIDs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + if m.Lun != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) + i-- + dAtA[i] = 0x10 + } + if len(m.TargetWWNs) > 0 { + for iNdEx := len(m.TargetWWNs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetWWNs[iNdEx]) + copy(dAtA[i:], m.TargetWWNs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetWWNs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Preconditions) Marshal() (dAtA []byte, err error) { +func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { +func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n161, err := m.PodSignature.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } } - i += n161 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n162, err := m.EvictionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n162 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { +func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlockerVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i -= len(m.DatasetUUID) + copy(dAtA[i:], m.DatasetUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n163, err := m.Preference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n163 - return i, nil + i -= len(m.DatasetName) + copy(dAtA[i:], m.DatasetName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Probe) Marshal() (dAtA []byte, err error) { +func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Probe) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n164, err := m.Handler.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n164 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + i-- dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PDName) + copy(dAtA[i:], m.PDName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRepoVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Sources) > 0 { - for _, msg := range m.Sources { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil + i -= len(m.Directory) + copy(dAtA[i:], m.Directory) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i-- + dAtA[i] = 0x1a + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x12 + i -= len(m.Repository) + copy(dAtA[i:], m.Repository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) - i += copy(dAtA[i:], m.Registry) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) - i += copy(dAtA[i:], m.Volume) - dAtA[i] = 0x18 - i++ + if m.EndpointsNamespace != nil { + i -= len(*m.EndpointsNamespace) + copy(dAtA[i:], *m.EndpointsNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) + i-- + dAtA[i] = 0x22 + } + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) - i += copy(dAtA[i:], m.Tenant) - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n165, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n165 - } - dAtA[i] = 0x40 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPGetAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.HTTPHeaders) > 0 { + for iNdEx := len(m.HTTPHeaders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HTTPHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x2a } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) + i -= len(m.Scheme) + copy(dAtA[i:], m.Scheme) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n166, err := m.SecretRef.MarshalTo(dAtA[i:]) + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n166 - } - dAtA[i] = 0x40 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n167 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) - i += copy(dAtA[i:], m.Range) - if m.Data != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationController) Marshal() (dAtA []byte, err error) { +func (m *Handler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Handler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Handler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n168, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n168 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n169, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n169 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n170, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n170 - return i, nil + return len(dAtA) - i, nil } -func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { +func (m *HostAlias) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n171, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Hostnames) > 0 { + for iNdEx := len(m.Hostnames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hostnames[iNdEx]) + copy(dAtA[i:], m.Hostnames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostnames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - i += n171 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { +func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPathVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n172, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n172 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - if m.Template != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n173, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n173 } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { +func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 + } + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a } } - return i, nil + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyToPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i += copy(dAtA[i:], m.ContainerName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n174, err := m.Divisor.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x18 } - i += n174 - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { +func (m *Lifecycle) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n175, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n175 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n176, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n176 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n177, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n177 - return i, nil -} - -func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n178, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreStop != nil { + { + size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n178 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.PostStart != nil { + { + size, err := m.PostStart.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { +func (m *LimitRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { - dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n179, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n179 - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ScopeSelector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) - n180, err := m.ScopeSelector.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n180 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) + if len(m.MaxLimitRequestRatio) > 0 { + keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) + for k := range m.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + for iNdEx := len(keysForMaxLimitRequestRatio) - 1; iNdEx >= 0; iNdEx-- { + v := m.MaxLimitRequestRatio[ResourceName(keysForMaxLimitRequestRatio[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMaxLimitRequestRatio[iNdEx]) + copy(dAtA[i:], keysForMaxLimitRequestRatio[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMaxLimitRequestRatio[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.DefaultRequest) > 0 { + keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) + for k := range m.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + for iNdEx := len(keysForDefaultRequest) - 1; iNdEx >= 0; iNdEx-- { + v := m.DefaultRequest[ResourceName(keysForDefaultRequest[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefaultRequest[iNdEx]) + copy(dAtA[i:], keysForDefaultRequest[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefaultRequest[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Default) > 0 { + keysForDefault := make([]string, 0, len(m.Default)) + for k := range m.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + for iNdEx := len(keysForDefault) - 1; iNdEx >= 0; iNdEx-- { + v := m.Default[ResourceName(keysForDefault[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n181, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(keysForDefault[iNdEx]) + copy(dAtA[i:], keysForDefault[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefault[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Min) > 0 { + keysForMin := make([]string, 0, len(m.Min)) + for k := range m.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + for iNdEx := len(keysForMin) - 1; iNdEx >= 0; iNdEx-- { + v := m.Min[ResourceName(keysForMin[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n181 + i-- + dAtA[i] = 0x12 + i -= len(keysForMin[iNdEx]) + copy(dAtA[i:], keysForMin[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMin[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - if len(m.Used) > 0 { - keysForUsed := make([]string, 0, len(m.Used)) - for k := range m.Used { - keysForUsed = append(keysForUsed, string(k)) + if len(m.Max) > 0 { + keysForMax := make([]string, 0, len(m.Max)) + for k := range m.Max { + keysForMax = append(keysForMax, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - for _, k := range keysForUsed { + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + for iNdEx := len(keysForMax) - 1; iNdEx >= 0; iNdEx-- { + v := m.Max[ResourceName(keysForMax[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - v := m.Used[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForMax[iNdEx]) + copy(dAtA[i:], keysForMax[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMax[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n182, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n182 } } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Limits) > 0 { - keysForLimits := make([]string, 0, len(m.Limits)) - for k := range m.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - for _, k := range keysForLimits { - dAtA[i] = 0xa - i++ - v := m.Limits[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n183, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n183 + i-- + dAtA[i] = 0x12 } } - if len(m.Requests) > 0 { - keysForRequests := make([]string, 0, len(m.Requests)) - for k := range m.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - for _, k := range keysForRequests { - dAtA[i] = 0x12 - i++ - v := m.Requests[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n184, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n184 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) - return i, nil + if len(m.Limits) > 0 { + for iNdEx := len(m.Limits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *List) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n185, err := m.SecretRef.MarshalTo(dAtA[i:]) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n185 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n186, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n186 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) - i += copy(dAtA[i:], m.ScopeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + return len(dAtA) - i, nil } -func (m *Secret) Marshal() (dAtA []byte, err error) { +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Secret) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n187, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n187 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.StringData) > 0 { - keysForStringData := make([]string, 0, len(m.StringData)) - for k := range m.StringData { - keysForStringData = append(keysForStringData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - for _, k := range keysForStringData { - dAtA[i] = 0x22 - i++ - v := m.StringData[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { +func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n188, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n188 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Server) + copy(dAtA[i:], m.Server) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { +func (m *Namespace) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n189, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n189 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretList) Marshal() (dAtA []byte, err error) { +func (m *NamespaceCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n190, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n190 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretProjection) Marshal() (dAtA []byte, err error) { +func (m *NamespaceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n191, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n191 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretReference) Marshal() (dAtA []byte, err error) { +func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecurityContext) Marshal() (dAtA []byte, err error) { +func (m *Node) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Capabilities != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n192, err := m.Capabilities.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n192 - } - if m.Privileged != nil { - dAtA[i] = 0x10 - i++ - if *m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.SELinuxOptions != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n193, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n193 - } - if m.RunAsUser != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x28 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.ReadOnlyRootFilesystem != nil { - dAtA[i] = 0x30 - i++ - if *m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x38 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ - } - if m.RunAsGroup != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) - i += copy(dAtA[i:], *m.ProcMount) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SerializedReference) Marshal() (dAtA []byte, err error) { +func (m *NodeAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n194, err := m.Reference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n194 - return i, nil -} - -func (m *Service) Marshal() (dAtA []byte, err error) { +func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Service) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n195, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n195 + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n196, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n196 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n197, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n197 - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { +func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n198, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n198 - if len(m.Secrets) > 0 { - for _, msg := range m.Secrets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0x20 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { +func (m *NodeCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n199, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n199 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x22 + { + size, err := m.LastHeartbeatTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) - i += copy(dAtA[i:], m.Audience) - if m.ExpirationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceList) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n200, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n200 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil -} - -func (m *ServicePort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n201, err := m.TargetPort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n201 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + if m.LastKnownGood != nil { + { + size, err := m.LastKnownGood.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n - } - } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) - i += copy(dAtA[i:], m.ClusterIP) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Active != nil { + { + size, err := m.Active.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) - i += copy(dAtA[i:], m.SessionAffinity) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) - i += copy(dAtA[i:], m.LoadBalancerIP) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Assigned != nil { + { + size, err := m.Assigned.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) - i += copy(dAtA[i:], m.ExternalName) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) - i += copy(dAtA[i:], m.ExternalTrafficPolicy) - dAtA[i] = 0x60 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) - dAtA[i] = 0x68 - i++ - if m.PublishNotReadyAddresses { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SessionAffinityConfig != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n202, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n202 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { +func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n203, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n203 - return i, nil -} - -func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { +func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ClientIP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n204, err := m.ClientIP.MarshalTo(dAtA[i:]) + { + size, err := m.KubeletEndpoint.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n204 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n205, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n205 - } - return i, nil -} - -func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n206, err := m.SecretRef.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n206 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Sysctl) Marshal() (dAtA []byte, err error) { +func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil + return len(dAtA) - i, nil } -func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { +func (m *NodeResources) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n207, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - i += n207 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil + return len(dAtA) - i, nil } -func (m *Taint) Marshal() (dAtA []byte, err error) { +func (m *NodeSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Taint) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TimeAdded != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n208, err := m.TimeAdded.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.NodeSelectorTerms) > 0 { + for iNdEx := len(m.NodeSelectorTerms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NodeSelectorTerms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n208 } - return i, nil + return len(dAtA) - i, nil } -func (m *Toleration) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TolerationSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - return i, nil + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if len(m.Values) > 0 { - for _, s := range m.Values { + if len(m.MatchFields) > 0 { + for iNdEx := len(m.MatchFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, msg := range m.MatchLabelExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if len(m.PodCIDRs) > 0 { + for iNdEx := len(m.PodCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PodCIDRs[iNdEx]) + copy(dAtA[i:], m.PodCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.ConfigSource != nil { + { + size, err := m.ConfigSource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } } - return i, nil + i-- + if m.Unschedulable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.ProviderID) + copy(dAtA[i:], m.ProviderID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i-- + dAtA[i] = 0x1a + i -= len(m.DoNotUse_ExternalID) + copy(dAtA[i:], m.DoNotUse_ExternalID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) + i-- + dAtA[i] = 0x12 + i -= len(m.PodCIDR) + copy(dAtA[i:], m.PodCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.APIGroup != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) - i += copy(dAtA[i:], *m.APIGroup) + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + if len(m.VolumesAttached) > 0 { + for iNdEx := len(m.VolumesAttached) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumesAttached[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.VolumesInUse) > 0 { + for iNdEx := len(m.VolumesInUse) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumesInUse[iNdEx]) + copy(dAtA[i:], m.VolumesInUse[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumesInUse[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.DaemonEndpoints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) + for k := range m.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- { + v := m.Allocatable[ResourceName(keysForAllocatable[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatable[iNdEx]) + copy(dAtA[i:], keysForAllocatable[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Volume) Marshal() (dAtA []byte, err error) { +func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Volume) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0x52 + i -= len(m.OperatingSystem) + copy(dAtA[i:], m.OperatingSystem) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) + i-- + dAtA[i] = 0x4a + i -= len(m.KubeProxyVersion) + copy(dAtA[i:], m.KubeProxyVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) + i-- + dAtA[i] = 0x42 + i -= len(m.KubeletVersion) + copy(dAtA[i:], m.KubeletVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) + i-- + dAtA[i] = 0x3a + i -= len(m.ContainerRuntimeVersion) + copy(dAtA[i:], m.ContainerRuntimeVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.OSImage) + copy(dAtA[i:], m.OSImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) + i-- + dAtA[i] = 0x2a + i -= len(m.KernelVersion) + copy(dAtA[i:], m.KernelVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.BootID) + copy(dAtA[i:], m.BootID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) + i-- + dAtA[i] = 0x1a + i -= len(m.SystemUUID) + copy(dAtA[i:], m.SystemUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n209, err := m.VolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n209 - return i, nil + i -= len(m.MachineID) + copy(dAtA[i:], m.MachineID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { +func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeMount) Marshal() (dAtA []byte, err error) { +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- + dAtA[i] = 0x3a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) - i += copy(dAtA[i:], m.MountPath) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i += copy(dAtA[i:], m.SubPath) - if m.MountPropagation != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) - i += copy(dAtA[i:], *m.MountPropagation) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) - i += copy(dAtA[i:], m.SubPathExpr) - return i, nil + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Required != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) - n210, err := m.Required.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n210 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Secret != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n211, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n211 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n212, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n212 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ConfigMap != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n213, err := m.ConfigMap.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n213 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ServiceAccountToken != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) - n214, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n214 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HostPath != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n215, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n215 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n216, err := m.EmptyDir.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n216 - } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n217, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n217 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n218, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n218 - } - if m.GitRepo != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n219, err := m.GitRepo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n219 - } - if m.Secret != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n220, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n220 - } - if m.NFS != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n221, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n221 - } - if m.ISCSI != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n222, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n222 - } - if m.Glusterfs != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n223, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n223 - } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n224, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n224 - } - if m.RBD != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n225, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n225 - } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n226, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n226 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Cinder != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n227, err := m.Cinder.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n227 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.CephFS != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n228, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n228 + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.Flocker != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n229, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n229 } - if m.DownwardAPI != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n230, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n230 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.FC != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n231, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n231 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.AzureFile != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n232, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DataSource != nil { + { + size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n232 + i-- + dAtA[i] = 0x3a } - if m.ConfigMap != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n233, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n233 + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x32 } - if m.VsphereVolume != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n234, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n234 + if m.StorageClassName != nil { + i -= len(*m.StorageClassName) + copy(dAtA[i:], *m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i-- + dAtA[i] = 0x2a } - if m.Quobyte != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n235, err := m.Quobyte.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n235 + i-- + dAtA[i] = 0x22 } - if m.AzureDisk != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n236, err := m.AzureDisk.MarshalTo(dAtA[i:]) + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n236 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n237, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x12 + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0xa } - i += n237 } - if m.PortworxVolume != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n238, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n238 + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.ScaleIO != nil { - dAtA[i] = 0xca - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n239, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n239 } - if m.Projected != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n240, err := m.Projected.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - i += n240 - } - if m.StorageOS != nil { - dAtA[i] = 0xda - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n241, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } - i += n241 } - if m.CSI != nil { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n242, err := m.CSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i += n242 } - return i, nil + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.ClaimName) + copy(dAtA[i:], m.ClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) - i += copy(dAtA[i:], m.VolumePath) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) - i += copy(dAtA[i:], m.StoragePolicyName) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) - i += copy(dAtA[i:], m.StoragePolicyID) - return i, nil + return len(dAtA) - i, nil } -func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n243, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n243 - return i, nil + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return offset + 1 + return dAtA[:n], nil } -func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n + +func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Affinity) Size() (n int) { +func (m *PersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.PodAffinity != nil { - l = m.PodAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - if m.PodAntiAffinity != nil { - l = m.PodAntiAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Local != nil { + { + size, err := m.Local.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - return n + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *AttachedVolume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *AvoidPods) Size() (n int) { - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, e := range m.PreferAvoidPods { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AzureDiskVolumeSource) Size() (n int) { +func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.DiskName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DataDiskURI) - n += 1 + l + sovGenerated(uint64(l)) - if m.CachingMode != nil { - l = len(*m.CachingMode) - n += 1 + l + sovGenerated(uint64(l)) + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x42 } - if m.ReadOnly != nil { - n += 2 + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x3a + } } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.StorageClassName) + copy(dAtA[i:], m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i-- + dAtA[i] = 0x32 + i -= len(m.PersistentVolumeReclaimPolicy) + copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i-- + dAtA[i] = 0x2a + if m.ClaimRef != nil { + { + size, err := m.ClaimRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - return n + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.PersistentVolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *AzureFilePersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretNamespace != nil { - l = len(*m.SecretNamespace) - n += 1 + l + sovGenerated(uint64(l)) +func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *AzureFileVolumeSource) Size() (n int) { +func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Binding) Size() (n int) { +func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PdID) + copy(dAtA[i:], m.PdID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CSIPersistentVolumeSource) Size() (n int) { +func (m *Pod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeHandle) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ControllerPublishSecretRef != nil { - l = m.ControllerPublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NodeStageSecretRef != nil { - l = m.NodeStageSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *CSIVolumeSource) Size() (n int) { +func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - if m.ReadOnly != nil { - n += 2 - } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Capabilities) Size() (n int) { +func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x1a + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *CephFSPersistentVolumeSource) Size() (n int) { +func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - n += 2 - return n + return len(dAtA) - i, nil } -func (m *CephFSVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - return n + return dAtA[:n], nil } -func (m *CinderPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CinderVolumeSource) Size() (n int) { +func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ClientIPConfig) Size() (n int) { - var l int - _ = l - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) +func (m *PodCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ComponentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ComponentStatus) Size() (n int) { +func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ComponentStatusList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMap) Size() (n int) { +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.BinaryData) > 0 { - for k, v := range m.BinaryData { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Searches) > 0 { + for iNdEx := len(m.Searches) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Searches[iNdEx]) + copy(dAtA[i:], m.Searches[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Searches[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return n + if len(m.Nameservers) > 0 { + for iNdEx := len(m.Nameservers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Nameservers[iNdEx]) + copy(dAtA[i:], m.Nameservers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Nameservers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ConfigMapEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMapKeySelector) Size() (n int) { +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfigOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 } - return n + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ConfigMapList) Size() (n int) { +func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - return n + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ConfigMapNodeConfigSource) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletConfigKey) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ConfigMapProjection) Size() (n int) { +func (m *PodIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMapVolumeSource) Size() (n int) { +func (m *PodList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Optional != nil { - n += 2 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Container) Size() (n int) { +func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 } - l = len(m.WorkingDir) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 } - l = len(m.TerminationMessagePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - n += 3 - n += 3 - n += 3 - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = len(m.TerminationMessagePolicy) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.VolumeDevices) > 0 { - for _, e := range m.VolumeDevices { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ContainerImage) Size() (n int) { +func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPortForwardOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x8 } } - n += 1 + sovGenerated(uint64(m.SizeBytes)) - return n + return len(dAtA) - i, nil } -func (m *ContainerPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HostPort)) - n += 1 + sovGenerated(uint64(m.ContainerPort)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ContainerState) Size() (n int) { +func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Waiting != nil { - l = m.Waiting.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Running != nil { - l = m.Running.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Terminated != nil { - l = m.Terminated.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ContainerStateRunning) Size() (n int) { - var l int - _ = l - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStateTerminated) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ExitCode)) - n += 1 + sovGenerated(uint64(m.Signal)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ContainerStateWaiting) Size() (n int) { - var l int - _ = l - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ContainerStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.State.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTerminationState.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 1 + sovGenerated(uint64(m.RestartCount)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImageID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DaemonEndpoint) Size() (n int) { +func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.Port)) - return n + i -= len(m.ConditionType) + copy(dAtA[i:], m.ConditionType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DownwardAPIProjection) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *DownwardAPIVolumeFile) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) - } - return n +func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DownwardAPIVolumeSource) Size() (n int) { +func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - return n -} - -func (m *EmptyDirVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Medium) - n += 1 + l + sovGenerated(uint64(l)) - if m.SizeLimit != nil { - l = m.SizeLimit.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Sysctls) > 0 { + for iNdEx := len(m.Sysctls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sysctls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - return n -} - -func (m *EndpointAddress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetRef != nil { - l = m.TargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x30 } - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - if m.NodeName != nil { - l = len(*m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) + if m.FSGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) + i-- + dAtA[i] = 0x28 } - return n -} - -func (m *EndpointPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EndpointSubset) Size() (n int) { - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x20 } } - if len(m.NotReadyAddresses) > 0 { - for _, e := range m.NotReadyAddresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x10 } - return n -} - -func (m *Endpoints) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subsets) > 0 { - for _, e := range m.Subsets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *EndpointsList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *EnvFromSource) Size() (n int) { - var l int - _ = l - l = len(m.Prefix) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConfigMapRef != nil { - l = m.ConfigMapRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EnvVar) Size() (n int) { +func (m *PodSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PodController != nil { + { + size, err := m.PodController.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *EnvVarSource) Size() (n int) { - var l int - _ = l - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretKeyRef != nil { - l = m.SecretKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Event) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.InvolvedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FirstTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Count)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.EventTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Series != nil { - l = m.Series.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Action) - n += 1 + l + sovGenerated(uint64(l)) - if m.Related != nil { - l = m.Related.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ReportingController) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ReportingInstance) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventList) Size() (n int) { +func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 } } - return n -} - -func (m *EventSeries) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Count)) - l = m.LastObservedTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.State) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EventSource) Size() (n int) { - var l int - _ = l - l = len(m.Component) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExecAction) Size() (n int) { - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologySpreadConstraints) > 0 { + for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a } } - return n -} - -func (m *FCVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Overhead) > 0 { + keysForOverhead := make([]string, 0, len(m.Overhead)) + for k := range m.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + for iNdEx := len(keysForOverhead) - 1; iNdEx >= 0; iNdEx-- { + v := m.Overhead[ResourceName(keysForOverhead[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForOverhead[iNdEx]) + copy(dAtA[i:], keysForOverhead[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOverhead[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa } - if m.Lun != nil { - n += 1 + sovGenerated(uint64(*m.Lun)) + if m.EnableServiceLinks != nil { + i-- + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 } - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.RuntimeClassName != nil { + i -= len(*m.RuntimeClassName) + copy(dAtA[i:], *m.RuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.ReadinessGates) > 0 { + for iNdEx := len(m.ReadinessGates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReadinessGates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 } } - return n -} - -func (m *FlexPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ShareProcessNamespace != nil { + i-- + if *m.ShareProcessNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.DNSConfig != nil { + { + size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } - return n -} - -func (m *FlexVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + if len(m.HostAliases) > 0 { + for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - return n -} - -func (m *FlockerVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.DatasetName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DatasetUUID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GCEPersistentDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.PDName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n -} - -func (m *GitRepoVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Repository) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Revision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Directory) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + i -= len(m.SchedulerName) + copy(dAtA[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + i -= len(m.Subdomain) + copy(dAtA[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x52 + i -= len(m.DeprecatedServiceAccount) + copy(dAtA[i:], m.DeprecatedServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i-- + dAtA[i] = 0x4a + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x42 + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.DNSPolicy) + copy(dAtA[i:], m.DNSPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) + i-- + dAtA[i] = 0x32 + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x28 + } + if m.TerminationGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x1a + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *GlusterfsPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.EndpointsNamespace != nil { - l = len(*m.EndpointsNamespace) - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *GlusterfsVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HTTPGetAction) Size() (n int) { +func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scheme) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.HTTPHeaders) > 0 { - for _, e := range m.HTTPHeaders { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainerStatuses) > 0 { + for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } } - return n + if len(m.PodIPs) > 0 { + for iNdEx := len(m.PodIPs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PodIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + i -= len(m.NominatedNodeName) + copy(dAtA[i:], m.NominatedNodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) + i-- + dAtA[i] = 0x5a + if len(m.InitContainerStatuses) > 0 { + for iNdEx := len(m.InitContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + i -= len(m.QOSClass) + copy(dAtA[i:], m.QOSClass) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) + i-- + dAtA[i] = 0x4a + if len(m.ContainerStatuses) > 0 { + for iNdEx := len(m.ContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.StartTime != nil { + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.PodIP) + copy(dAtA[i:], m.PodIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i-- + dAtA[i] = 0x32 + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HTTPHeader) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Handler) Size() (n int) { +func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodStatusResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *HostAlias) Size() (n int) { +func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HostPathVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ISCSIPersistentVolumeSource) Size() (n int) { +func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ISCSIVolumeSource) Size() (n int) { +func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *KeyToPath) Size() (n int) { +func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortworxVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Lifecycle) Size() (n int) { +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.PostStart != nil { - l = m.PostStart.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa } - if m.PreStop != nil { - l = m.PreStop.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LimitRange) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LimitRangeItem) Size() (n int) { +func (m *PreferAvoidPodsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Max) > 0 { - for k, v := range m.Max { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.EvictionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Min) > 0 { - for k, v := range m.Min { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x12 + { + size, err := m.PodSignature.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Default) > 0 { - for k, v := range m.Default { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.DefaultRequest) > 0 { - for k, v := range m.DefaultRequest { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MaxLimitRequestRatio) > 0 { - for k, v := range m.MaxLimitRequestRatio { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LimitRangeSpec) Size() (n int) { +func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreferredSchedulingTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Limits) > 0 { - for _, e := range m.Limits { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Preference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *List) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Probe) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LoadBalancerIngress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LoadBalancerStatus) Size() (n int) { +func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) + i-- + dAtA[i] = 0x10 + { + size, err := m.Handler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LocalObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *LocalVolumeSource) Size() (n int) { +func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectedVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 } - return n + if len(m.Sources) > 0 { + for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NFSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Server) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Namespace) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamespaceList) Size() (n int) { +func (m *QuobyteVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x32 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x2a + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x18 + i -= len(m.Volume) + copy(dAtA[i:], m.Volume) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceSpec) Size() (n int) { - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NamespaceStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Node) Size() (n int) { +func (m *RBDPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NodeAddress) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Address) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeAffinity) Size() (n int) { +func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RBDVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - return n + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NodeCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastHeartbeatTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeConfigSource) Size() (n int) { - var l int - _ = l - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeConfigStatus) Size() (n int) { +func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Assigned != nil { - l = m.Assigned.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a } - if m.Active != nil { - l = m.Active.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Range) + copy(dAtA[i:], m.Range) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.LastKnownGood != nil { - l = m.LastKnownGood.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicationController) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) Size() (n int) { - var l int - _ = l - l = m.KubeletEndpoint.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeList) Size() (n int) { +func (m *ReplicationController) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeResources) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n +func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeSelector) Size() (n int) { +func (m *ReplicationControllerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, e := range m.NodeSelectorTerms { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeSelectorTerm) Size() (n int) { +func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if len(m.MatchFields) > 0 { - for _, e := range m.MatchFields { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSpec) Size() (n int) { +func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.PodCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DoNotUse_ExternalID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ProviderID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Taints) > 0 { - for _, e := range m.Taints { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.ConfigSource != nil { - l = m.ConfigSource.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - return n + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *NodeStatus) Size() (n int) { +func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - if len(m.Allocatable) > 0 { - for k, v := range m.Allocatable { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Divisor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.DaemonEndpoints.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.NodeInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.VolumesAttached) > 0 { - for _, e := range m.VolumesAttached { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSystemInfo) Size() (n int) { - var l int - _ = l - l = len(m.MachineID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SystemUUID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BootID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KernelVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OSImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerRuntimeVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeProxyVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OperatingSystem) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Architecture) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ObjectFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ObjectReference) Size() (n int) { +func (m *ResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolume) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeClaim) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PersistentVolumeClaimCondition) Size() (n int) { +func (m *ResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.ScopeSelector != nil { + { + size, err := m.ScopeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeClaimSpec) Size() (n int) { +func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Used) > 0 { + keysForUsed := make([]string, 0, len(m.Used)) + for k := range m.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + for iNdEx := len(keysForUsed) - 1; iNdEx >= 0; iNdEx-- { + v := m.Used[ResourceName(keysForUsed[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsed[iNdEx]) + copy(dAtA[i:], keysForUsed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StorageClassName != nil { - l = len(*m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - if m.DataSource != nil { - l = m.DataSource.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) Size() (n int) { +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) } - } - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[ResourceName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { + v := m.Limits[ResourceName(keysForLimits[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLimits[iNdEx]) + copy(dAtA[i:], keysForLimits[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.ClaimName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeList) Size() (n int) { +func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0x22 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeSource) Size() (n int) { +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleIOPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Local != nil { - l = m.Local.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeSpec) Size() (n int) { +func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleIOVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.PersistentVolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ClaimRef != nil { - l = m.ClaimRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = len(m.PersistentVolumeReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { +func (m *ScopeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.PdID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Pod) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodAffinity) Size() (n int) { +func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopedResourceSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.ScopeName) + copy(dAtA[i:], m.ScopeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodAffinityTerm) Size() (n int) { +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + keysForStringData := make([]string, 0, len(m.StringData)) + for k := range m.StringData { + keysForStringData = append(keysForStringData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- { + v := m.StringData[string(keysForStringData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForStringData[iNdEx]) + copy(dAtA[i:], keysForStringData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStringData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - l = len(m.TopologyKey) - n += 1 + l + sovGenerated(uint64(l)) - return n + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAntiAffinity) Size() (n int) { +func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAttachOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodDNSConfig) Size() (n int) { +func (m *SecretKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - if len(m.Options) > 0 { - for _, e := range m.Options { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDNSConfigOption) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = len(*m.Value) - n += 1 + l + sovGenerated(uint64(l)) +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodExecOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodList) Size() (n int) { +func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodLogOptions) Size() (n int) { +func (m *SecretProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.SinceSeconds != nil { - n += 1 + sovGenerated(uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - l = m.SinceTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - n += 2 - if m.TailLines != nil { - n += 1 + sovGenerated(uint64(*m.TailLines)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.LimitBytes != nil { - n += 1 + sovGenerated(uint64(*m.LimitBytes)) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodPortForwardOptions) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - n += 1 + sovGenerated(uint64(e)) - } +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodReadinessGate) Size() (n int) { +func (m *SecretReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.ConditionType) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSecurityContext) Size() (n int) { +func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if len(m.SupplementalGroups) > 0 { - for _, e := range m.SupplementalGroups { - n += 1 + sovGenerated(uint64(e)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if m.FSGroup != nil { - n += 1 + sovGenerated(uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 } - if len(m.Sysctls) > 0 { - for _, e := range m.Sysctls { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSignature) Size() (n int) { - var l int - _ = l - if m.PodController != nil { - l = m.PodController.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *SecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodSpec) Size() (n int) { +func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - l = len(m.RestartPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + if m.ProcMount != nil { + i -= len(*m.ProcMount) + copy(dAtA[i:], *m.ProcMount) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) + i-- + dAtA[i] = 0x4a } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x40 } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x38 } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnlyRootFilesystem != nil { + i-- + if *m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 } - l = len(m.Hostname) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.Subdomain) - n += 2 + l + sovGenerated(uint64(l)) - if m.Affinity != nil { - l = m.Affinity.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x20 } - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.AutomountServiceAccountToken != nil { - n += 3 - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Privileged != nil { + i-- + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - if len(m.HostAliases) > 0 { - for _, e := range m.HostAliases { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Capabilities != nil { + { + size, err := m.Capabilities.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - l = len(m.PriorityClassName) - n += 2 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 2 + sovGenerated(uint64(*m.Priority)) + return len(dAtA) - i, nil +} + +func (m *SerializedReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerializedReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ShareProcessNamespace != nil { - n += 3 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.ReadinessGates) > 0 { - for _, e := range m.ReadinessGates { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RuntimeClassName != nil { - l = len(*m.RuntimeClassName) - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.EnableServiceLinks != nil { - n += 3 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatus) Size() (n int) { +func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if len(m.ContainerStatuses) > 0 { - for _, e := range m.ContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - l = len(m.QOSClass) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.InitContainerStatuses) > 0 { - for _, e := range m.InitContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.NominatedNodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatusResult) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodTemplateList) Size() (n int) { +func (m *ServiceAccountList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PortworxVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Preconditions) Size() (n int) { +func (m *ServiceAccountTokenProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.UID != nil { - l = len(*m.UID) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 } - return n -} - -func (m *PreferAvoidPodsEntry) Size() (n int) { - var l int - _ = l - l = m.PodSignature.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.EvictionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferredSchedulingTerm) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.Preference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Probe) Size() (n int) { - var l int - _ = l - l = m.Handler.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) - n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - n += 1 + sovGenerated(uint64(m.SuccessThreshold)) - n += 1 + sovGenerated(uint64(m.FailureThreshold)) - return n +func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ProjectedVolumeSource) Size() (n int) { +func (m *ServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Sources) > 0 { - for _, e := range m.Sources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *QuobyteVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Registry) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Volume) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Tenant) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServicePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RBDPersistentVolumeSource) Size() (n int) { +func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) + i-- + dAtA[i] = 0x28 + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - return n + return dAtA[:n], nil } -func (m *RangeAllocation) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Range) - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicationController) Size() (n int) { +func (m *ServiceProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReplicationControllerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicationControllerSpec) Size() (n int) { +func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) + if m.IPFamily != nil { + i -= len(*m.IPFamily) + copy(dAtA[i:], *m.IPFamily) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) + i-- + dAtA[i] = 0x7a + } + if m.SessionAffinityConfig != nil { + { + size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + i-- + if m.PublishNotReadyAddresses { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) + i-- + dAtA[i] = 0x60 + i -= len(m.ExternalTrafficPolicy) + copy(dAtA[i:], m.ExternalTrafficPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) + i-- + dAtA[i] = 0x5a + i -= len(m.ExternalName) + copy(dAtA[i:], m.ExternalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) + i-- + dAtA[i] = 0x52 + if len(m.LoadBalancerSourceRanges) > 0 { + for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LoadBalancerSourceRanges[iNdEx]) + copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + i -= len(m.LoadBalancerIP) + copy(dAtA[i:], m.LoadBalancerIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) + i-- + dAtA[i] = 0x42 + i -= len(m.SessionAffinity) + copy(dAtA[i:], m.SessionAffinity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) + i-- + dAtA[i] = 0x3a + if len(m.ExternalIPs) > 0 { + for iNdEx := len(m.ExternalIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExternalIPs[iNdEx]) + copy(dAtA[i:], m.ExternalIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalIPs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x22 + i -= len(m.ClusterIP) + copy(dAtA[i:], m.ClusterIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + return len(dAtA) - i, nil } -func (m *ReplicationControllerStatus) Size() (n int) { +func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.ContainerName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Divisor.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ResourceQuota) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceQuotaList) Size() (n int) { +func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ClientIP != nil { + { + size, err := m.ClientIP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ResourceQuotaSpec) Size() (n int) { +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ScopeSelector != nil { - l = m.ScopeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ResourceQuotaStatus) Size() (n int) { +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.Used) > 0 { - for k, v := range m.Used { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceRequirements) Size() (n int) { - var l int - _ = l - if len(m.Limits) > 0 { - for k, v := range m.Limits { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Requests) > 0 { - for k, v := range m.Requests { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } +func (m *Sysctl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SELinuxOptions) Size() (n int) { - var l int - _ = l - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Role) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Level) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ScaleIOPersistentVolumeSource) Size() (n int) { +func (m *Sysctl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleIOVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + return dAtA[:n], nil } -func (m *ScopeSelector) Size() (n int) { +func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TCPSocketAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScopedResourceSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.ScopeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Taint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Secret) Size() (n int) { +func (m *Taint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Taint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) + if m.TimeAdded != nil { + { + size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.StringData) > 0 { - for k, v := range m.StringData { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - return n + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x1a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SecretKeySelector) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretList) Size() (n int) { +func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.TolerationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i-- + dAtA[i] = 0x28 } - return n + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x22 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretProjection) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 +func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SecretReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretVolumeSource) Size() (n int) { +func (m *TopologySelectorLabelRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - if m.Optional != nil { - n += 2 - } - return n + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecurityContext) Size() (n int) { - var l int - _ = l - if m.Capabilities != nil { - l = m.Capabilities.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Privileged != nil { - n += 2 - } - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if m.ReadOnlyRootFilesystem != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 2 - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - l = len(*m.ProcMount) - n += 1 + l + sovGenerated(uint64(l)) +func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n -} - -func (m *SerializedReference) Size() (n int) { - var l int - _ = l - l = m.Reference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *Service) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceAccount) Size() (n int) { +func (m *TopologySelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Secrets) > 0 { - for _, e := range m.Secrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchLabelExpressions) > 0 { + for iNdEx := len(m.MatchLabelExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchLabelExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.AutomountServiceAccountToken != nil { - n += 2 - } - return n + return len(dAtA) - i, nil } -func (m *ServiceAccountList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *TopologySpreadConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ServiceAccountTokenProjection) Size() (n int) { - var l int - _ = l - l = len(m.Audience) - n += 1 + l + sovGenerated(uint64(l)) - if m.ExpirationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySpreadConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceList) Size() (n int) { +func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - return n + i -= len(m.WhenUnsatisfiable) + copy(dAtA[i:], m.WhenUnsatisfiable) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenUnsatisfiable))) + i-- + dAtA[i] = 0x1a + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSkew)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ServicePort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = m.TargetPort.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.NodePort)) - return n +func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ServiceProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceSpec) Size() (n int) { +func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ClusterIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SessionAffinity) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.LoadBalancerIP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ExternalName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExternalTrafficPolicy) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) - n += 2 - if m.SessionAffinityConfig != nil { - l = m.SessionAffinityConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ServiceStatus) Size() (n int) { - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SessionAffinityConfig) Size() (n int) { - var l int - _ = l - if m.ClientIP != nil { - l = m.ClientIP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StorageOSPersistentVolumeSource) Size() (n int) { +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.VolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageOSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Sysctl) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TCPSocketAction) Size() (n int) { +func (m *VolumeDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Taint) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeAdded != nil { - l = m.TimeAdded.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Toleration) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TolerationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) - } - return n +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TopologySelectorLabelRequirement) Size() (n int) { +func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.SubPathExpr) + copy(dAtA[i:], m.SubPathExpr) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) + i-- + dAtA[i] = 0x32 + if m.MountPropagation != nil { + i -= len(*m.MountPropagation) + copy(dAtA[i:], *m.MountPropagation) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) + i-- + dAtA[i] = 0x2a } - return n + i -= len(m.SubPath) + copy(dAtA[i:], m.SubPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i-- + dAtA[i] = 0x22 + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x1a + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TopologySelectorTerm) Size() (n int) { - var l int - _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, e := range m.MatchLabelExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *TypedLocalObjectReference) Size() (n int) { +func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.APIGroup != nil { - l = len(*m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) + if m.Required != nil { + { + size, err := m.Required.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n + return len(dAtA) - i, nil } -func (m *Volume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.VolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *VolumeDevice) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VolumeMount) Size() (n int) { +func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.MountPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.MountPropagation != nil { - l = len(*m.MountPropagation) - n += 1 + l + sovGenerated(uint64(l)) + if m.ServiceAccountToken != nil { + { + size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - l = len(m.SubPathExpr) - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VolumeNodeAffinity) Size() (n int) { - var l int - _ = l - if m.Required != nil { - l = m.Required.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *VolumeProjection) Size() (n int) { +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Projected != nil { + { + size, err := m.Projected.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } - if m.ServiceAccountToken != nil { - l = m.ServiceAccountToken.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca } - return n -} - -func (m *VolumeSource) Size() (n int) { - var l int - _ = l - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if m.EmptyDir != nil { - l = m.EmptyDir.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - if m.GitRepo != nil { - l = m.GitRepo.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } - if m.PersistentVolumeClaim != nil { - l = m.PersistentVolumeClaim.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PersistentVolumeClaim != nil { + { + size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - if m.FC != nil { - l = m.FC.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.GitRepo != nil { + { + size, err := m.GitRepo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.EmptyDir != nil { + { + size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StoragePolicyID) + copy(dAtA[i:], m.StoragePolicyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) + i-- + dAtA[i] = 0x22 + i -= len(m.StoragePolicyName) + copy(dAtA[i:], m.StoragePolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) + i-- + dAtA[i] = 0x1a + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumePath) + copy(dAtA[i:], m.VolumePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.Projected != nil { - l = m.Projected.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WeightedPodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PodAffinityTerm.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RunAsUserName != nil { + i -= len(*m.RunAsUserName) + copy(dAtA[i:], *m.RunAsUserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RunAsUserName))) + i-- + dAtA[i] = 0x1a } - return n + if m.GMSACredentialSpec != nil { + i -= len(*m.GMSACredentialSpec) + copy(dAtA[i:], *m.GMSACredentialSpec) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec))) + i-- + dAtA[i] = 0x12 + } + if m.GMSACredentialSpecName != nil { + i -= len(*m.GMSACredentialSpecName) + copy(dAtA[i:], *m.GMSACredentialSpecName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.VolumePath) + l = len(m.VolumeID) n += 1 + l + sovGenerated(uint64(l)) l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyID) - n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 return n } -func (m *WeightedPodAffinityTerm) Size() (n int) { +func (m *Affinity) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.PodAffinityTerm.Size() + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AttachedVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) n += 1 + l + sovGenerated(uint64(l)) return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *AvoidPods) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AWSElasticBlockStoreVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *AzureDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Affinity) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Affinity{`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, - `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, - `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, - `}`, - }, "") - return s + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *AttachedVolume) String() string { - if this == nil { - return "nil" + +func (m *AzureFilePersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AttachedVolume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s -} -func (this *AvoidPods) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretNamespace != nil { + l = len(*m.SecretNamespace) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&AvoidPods{`, - `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *AzureDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *AzureFileVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureDiskVolumeSource{`, - `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, - `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, - `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `Kind:` + valueToStringGenerated(this.Kind) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *AzureFilePersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *Binding) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *AzureFileVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *CSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureFileVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Binding) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&Binding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if m.ControllerPublishSecretRef != nil { + l = m.ControllerPublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) + if m.NodeStageSecretRef != nil { + l = m.NodeStageSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIVolumeSource) String() string { - if this == nil { - return "nil" + if m.ControllerExpandSecretRef != nil { + l = m.ControllerExpandSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) + return n +} + +func (m *CSIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnly != nil { + n += 2 } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Capabilities) String() string { - if this == nil { - return "nil" + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Capabilities{`, - `Add:` + fmt.Sprintf("%v", this.Add) + `,`, - `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSVolumeSource) String() string { - if this == nil { - return "nil" + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&CephFSVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *CinderPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *Capabilities) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&CinderPersistentVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CinderVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&CinderVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClientIPConfig) String() string { - if this == nil { - return "nil" + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ClientIPConfig{`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `}`, - }, "") - return s + return n } -func (this *ComponentCondition) String() string { - if this == nil { - return "nil" + +func (m *CephFSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ComponentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatus) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ComponentStatus{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatusList) String() string { - if this == nil { - return "nil" + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ComponentStatusList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + n += 2 + return n } -func (this *ConfigMap) String() string { - if this == nil { - return "nil" + +func (m *CephFSVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string]string{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForData += "}" - keysForBinaryData := make([]string, 0, len(this.BinaryData)) - for k := range this.BinaryData { - keysForBinaryData = append(keysForBinaryData, k) + n += 2 + return n +} + +func (m *CinderPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - mapStringForBinaryData := "map[string][]byte{" - for _, k := range keysForBinaryData { - mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForBinaryData += "}" - s := strings.Join([]string{`&ConfigMap{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `BinaryData:` + mapStringForBinaryData + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapEnvSource) String() string { - if this == nil { - return "nil" + +func (m *CinderVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapKeySelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ConfigMapKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapList) String() string { - if this == nil { - return "nil" + +func (m *ClientIPConfig) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapNodeConfigSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapProjection) String() string { - if this == nil { - return "nil" + +func (m *ComponentCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ConfigMapVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ComponentStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapVolumeSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *Container) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Container{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, - `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, - `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, - `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, - `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, - `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ContainerImage) String() string { - if this == nil { - return "nil" + +func (m *ComponentStatusList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerImage{`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ContainerPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, - `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerState) String() string { - if this == nil { - return "nil" + +func (m *ConfigMap) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerState{`, - `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, - `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, - `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateRunning) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ContainerStateRunning{`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateTerminated) String() string { - if this == nil { - return "nil" + if len(m.BinaryData) > 0 { + for k, v := range m.BinaryData { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ContainerStateTerminated{`, - `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s + return n } -func (this *ContainerStateWaiting) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapEnvSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerStateWaiting{`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStatus) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&ContainerStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, - `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s + return n } -func (this *DaemonEndpoint) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapKeySelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DaemonEndpoint{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIProjection) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&DownwardAPIProjection{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *DownwardAPIVolumeFile) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DownwardAPIVolumeFile{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&DownwardAPIVolumeSource{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s + return n } -func (this *EmptyDirVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapNodeConfigSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EmptyDirVolumeSource{`, - `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, - `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletConfigKey) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *EndpointAddress) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapProjection) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EndpointAddress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointPort) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EndpointPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointSubset) String() string { - if this == nil { - return "nil" + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&EndpointSubset{`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Endpoints) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Endpoints{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointsList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EndpointsList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvFromSource) String() string { - if this == nil { - return "nil" + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - s := strings.Join([]string{`&EnvFromSource{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvVar) String() string { - if this == nil { - return "nil" + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&EnvVar{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *EnvVarSource) String() string { - if this == nil { - return "nil" + +func (m *Container) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EnvVarSource{`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, - `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Event) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, - `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, - `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, - `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, - `}`, - }, "") - return s -} -func (this *EventList) String() string { - if this == nil { - return "nil" + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventSeries) String() string { - if this == nil { - return "nil" + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventSeries{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `}`, - }, "") - return s -} -func (this *EventSource) String() string { - if this == nil { - return "nil" + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventSource{`, - `Component:` + fmt.Sprintf("%v", this.Component) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *ExecAction) String() string { - if this == nil { - return "nil" + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ExecAction{`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *FCVolumeSource) String() string { - if this == nil { - return "nil" + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&FCVolumeSource{`, - `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, - `Lun:` + valueToStringGenerated(this.Lun) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, - `}`, - }, "") - return s -} -func (this *FlexPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *FlexVolumeSource) String() string { - if this == nil { - return "nil" + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s + return n } -func (this *FlockerVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerImage) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&FlockerVolumeSource{`, - `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, - `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n } -func (this *GCEPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerPort) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, - `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *GitRepoVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerState) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GitRepoVolumeSource{`, - `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, - `}`, - }, "") - return s -} -func (this *GlusterfsPersistentVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, - `}`, - }, "") - return s -} -func (this *GlusterfsVolumeSource) String() string { - if this == nil { - return "nil" + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&GlusterfsVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPGetAction) String() string { - if this == nil { - return "nil" + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&HTTPGetAction{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, - `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *HTTPHeader) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateRunning) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HTTPHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Handler) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateTerminated) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Handler{`, - `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, - `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, - `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HostAlias) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateWaiting) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HostAlias{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HostPathVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HostPathVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Type:` + valueToStringGenerated(this.Type) + `,`, - `}`, - }, "") - return s -} -func (this *ISCSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + if m.Started != nil { + n += 2 } - s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s + return n } -func (this *ISCSIVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *DaemonEndpoint) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ISCSIVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n } -func (this *KeyToPath) String() string { - if this == nil { - return "nil" + +func (m *DownwardAPIProjection) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&KeyToPath{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *Lifecycle) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Lifecycle{`, - `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, - `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *LimitRange) String() string { - if this == nil { - return "nil" + +func (m *DownwardAPIVolumeFile) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRange{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeItem) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForMax := make([]string, 0, len(this.Max)) - for k := range this.Max { - keysForMax = append(keysForMax, string(k)) + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - mapStringForMax := "ResourceList{" - for _, k := range keysForMax { - mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) } - mapStringForMax += "}" - keysForMin := make([]string, 0, len(this.Min)) - for k := range this.Min { - keysForMin = append(keysForMin, string(k)) + return n +} + +func (m *DownwardAPIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - mapStringForMin := "ResourceList{" - for _, k := range keysForMin { - mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForMin += "}" - keysForDefault := make([]string, 0, len(this.Default)) - for k := range this.Default { - keysForDefault = append(keysForDefault, string(k)) + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - mapStringForDefault := "ResourceList{" - for _, k := range keysForDefault { - mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + return n +} + +func (m *EmptyDirVolumeSource) Size() (n int) { + if m == nil { + return 0 } - mapStringForDefault += "}" - keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) - for k := range this.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + var l int + _ = l + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - mapStringForDefaultRequest := "ResourceList{" - for _, k := range keysForDefaultRequest { - mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + return n +} + +func (m *EndpointAddress) Size() (n int) { + if m == nil { + return 0 } - mapStringForDefaultRequest += "}" - keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) - for k := range this.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - mapStringForMaxLimitRequestRatio := "ResourceList{" - for _, k := range keysForMaxLimitRequestRatio { - mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMaxLimitRequestRatio += "}" - s := strings.Join([]string{`&LimitRangeItem{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Max:` + mapStringForMax + `,`, - `Min:` + mapStringForMin + `,`, - `Default:` + mapStringForDefault + `,`, - `DefaultRequest:` + mapStringForDefaultRequest + `,`, - `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, - `}`, - }, "") - return s + return n } -func (this *LimitRangeList) String() string { - if this == nil { - return "nil" + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRangeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *LimitRangeSpec) String() string { - if this == nil { - return "nil" + +func (m *EndpointSubset) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRangeSpec{`, - `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *List) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&List{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerIngress) String() string { - if this == nil { - return "nil" + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerStatus) String() string { - if this == nil { - return "nil" + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LoadBalancerStatus{`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *LocalObjectReference) String() string { - if this == nil { - return "nil" + +func (m *Endpoints) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LocalObjectReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *LocalVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LocalVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `}`, - }, "") - return s + return n } -func (this *NFSVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *EndpointsList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NFSVolumeSource{`, - `Server:` + fmt.Sprintf("%v", this.Server) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Namespace) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Namespace{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NamespaceList) String() string { - if this == nil { - return "nil" + +func (m *EnvFromSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamespaceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NamespaceSpec{`, - `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `}`, - }, "") - return s + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *NamespaceStatus) String() string { - if this == nil { - return "nil" + +func (m *EnvVar) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamespaceStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *Node) String() string { - if this == nil { - return "nil" + +func (m *EnvVarSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Node{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAddress) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeAddress{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Address:` + fmt.Sprintf("%v", this.Address) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAffinity) String() string { - if this == nil { - return "nil" + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeCondition) String() string { - if this == nil { - return "nil" + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigSource) String() string { - if this == nil { - return "nil" + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeConfigSource{`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeConfigStatus) String() string { - if this == nil { - return "nil" + +func (m *EphemeralContainer) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeConfigStatus{`, - `Assigned:` + strings.Replace(fmt.Sprintf("%v", this.Assigned), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Active:` + strings.Replace(fmt.Sprintf("%v", this.Active), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `LastKnownGood:` + strings.Replace(fmt.Sprintf("%v", this.LastKnownGood), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.EphemeralContainerCommon.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TargetContainerName) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NodeDaemonEndpoints) String() string { - if this == nil { - return "nil" + +func (m *EphemeralContainerCommon) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeDaemonEndpoints{`, - `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeProxyOptions) String() string { - if this == nil { - return "nil" + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *NodeResources) String() string { - if this == nil { - return "nil" + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForCapacity += "}" - s := strings.Join([]string{`&NodeResources{`, - `Capacity:` + mapStringForCapacity + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelector) String() string { - if this == nil { - return "nil" + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSelector{`, - `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorRequirement) String() string { - if this == nil { - return "nil" + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSelectorRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorTerm) String() string { - if this == nil { - return "nil" + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSelectorTerm{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `MatchFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchFields), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSpec) String() string { - if this == nil { - return "nil" + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSpec{`, - `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, - `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, - `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, - `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, - `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, - `ConfigSource:` + strings.Replace(fmt.Sprintf("%v", this.ConfigSource), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeStatus) String() string { - if this == nil { - return "nil" + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) } - mapStringForCapacity += "}" - keysForAllocatable := make([]string, 0, len(this.Allocatable)) - for k := range this.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) + return n +} + +func (m *EphemeralContainers) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - mapStringForAllocatable := "ResourceList{" - for _, k := range keysForAllocatable { - mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForAllocatable += "}" - s := strings.Join([]string{`&NodeStatus{`, - `Capacity:` + mapStringForCapacity + `,`, - `Allocatable:` + mapStringForAllocatable + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, - `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, - `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, - `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, - `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, - `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, - `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeSystemInfo) String() string { - if this == nil { - return "nil" + +func (m *Event) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeSystemInfo{`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, - `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, - `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, - `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, - `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, - `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, - `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, - `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, - `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectFieldSelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ObjectFieldSelector{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectReference) String() string { - if this == nil { - return "nil" + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolume) String() string { - if this == nil { - return "nil" + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolume{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaim) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PersistentVolumeClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolumeClaimCondition) String() string { - if this == nil { - return "nil" + +func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolumeClaimList) String() string { - if this == nil { - return "nil" + +func (m *EventSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolumeClaimSpec) String() string { - if this == nil { - return "nil" + +func (m *ExecAction) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `DataSource:` + strings.Replace(fmt.Sprintf("%v", this.DataSource), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *PersistentVolumeClaimStatus) String() string { - if this == nil { - return "nil" + +func (m *FCVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Capacity:` + mapStringForCapacity + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimVolumeSource) String() string { - if this == nil { - return "nil" + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, - `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolumeList) String() string { - if this == nil { - return "nil" + +func (m *FlexPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } -func (this *PersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *FlexVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeSource{`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, - `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSpec) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeSpec{`, - `Capacity:` + mapStringForCapacity + `,`, - `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, - `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, - `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolumeStatus) String() string { - if this == nil { - return "nil" + +func (m *FlockerVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PhotonPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, - `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n } -func (this *Pod) String() string { - if this == nil { - return "nil" + +func (m *GitRepoVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Pod{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodAffinity) String() string { - if this == nil { - return "nil" + +func (m *GlusterfsPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinityTerm) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.EndpointsNamespace != nil { + l = len(*m.EndpointsNamespace) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodAffinityTerm{`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, - `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodAntiAffinity) String() string { - if this == nil { - return "nil" + +func (m *GlusterfsVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodAntiAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *PodAttachOptions) String() string { - if this == nil { - return "nil" + +func (m *HTTPGetAction) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodAttachOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `}`, - }, "") - return s -} -func (this *PodCondition) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodDNSConfig) String() string { - if this == nil { - return "nil" + +func (m *HTTPHeader) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodDNSConfig{`, - `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, - `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, - `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodDNSConfigOption) String() string { - if this == nil { - return "nil" + +func (m *Handler) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodDNSConfigOption{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *PodExecOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodExecOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *PodList) String() string { - if this == nil { - return "nil" + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodLogOptions) String() string { - if this == nil { - return "nil" + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodLogOptions{`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, - `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, - `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, - `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, - `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodPortForwardOptions) String() string { - if this == nil { - return "nil" + +func (m *HostAlias) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodPortForwardOptions{`, - `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, - `}`, - }, "") - return s -} -func (this *PodProxyOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodReadinessGate) String() string { - if this == nil { - return "nil" + +func (m *HostPathVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodReadinessGate{`, - `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityContext) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodSecurityContext{`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, - `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodSignature) String() string { - if this == nil { - return "nil" + +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodSignature{`, - `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&PodSpec{`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, - `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, - `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, - `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, - `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, - `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, - `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodStatus) String() string { - if this == nil { - return "nil" + +func (m *ISCSIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, - `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, - `}`, - }, "") - return s -} -func (this *PodStatusResult) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodStatusResult{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplate) String() string { - if this == nil { - return "nil" + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateList) String() string { - if this == nil { - return "nil" + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodTemplateSpec) String() string { - if this == nil { - return "nil" + +func (m *KeyToPath) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PortworxVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) } - s := strings.Join([]string{`&PortworxVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *Preconditions) String() string { - if this == nil { - return "nil" + +func (m *Lifecycle) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Preconditions{`, - `UID:` + valueToStringGenerated(this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *PreferAvoidPodsEntry) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PreferAvoidPodsEntry{`, - `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, - `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PreferredSchedulingTerm) String() string { - if this == nil { - return "nil" + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PreferredSchedulingTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Probe) String() string { - if this == nil { - return "nil" + +func (m *LimitRange) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Probe{`, - `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, - `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, - `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, - `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, - `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, - `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ProjectedVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeItem) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ProjectedVolumeSource{`, - `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s -} -func (this *QuobyteVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&QuobyteVolumeSource{`, - `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, - `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, - `}`, - }, "") - return s -} -func (this *RBDPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&RBDPersistentVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RBDVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&RBDVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RangeAllocation) String() string { - if this == nil { - return "nil" + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&RangeAllocation{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Range:` + fmt.Sprintf("%v", this.Range) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationController) String() string { - if this == nil { - return "nil" + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ReplicationController{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ReplicationControllerCondition) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationControllerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ReplicationControllerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ReplicationControllerSpec) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) + +func (m *LimitRangeSpec) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForSelector += "}" - s := strings.Join([]string{`&ReplicationControllerSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s + return n } -func (this *ReplicationControllerStatus) String() string { - if this == nil { - return "nil" + +func (m *List) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationControllerStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFieldSelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ResourceFieldSelector{`, - `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceQuota) String() string { - if this == nil { - return "nil" + +func (m *LoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ResourceQuota{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ResourceQuotaList) String() string { - if this == nil { - return "nil" + +func (m *LoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ResourceQuotaList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ResourceQuotaSpec) String() string { - if this == nil { - return "nil" + +func (m *LocalObjectReference) Size() (n int) { + if m == nil { + return 0 } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LocalVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForHard += "}" - s := strings.Join([]string{`&ResourceQuotaSpec{`, - `Hard:` + mapStringForHard + `,`, - `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, - `ScopeSelector:` + strings.Replace(fmt.Sprintf("%v", this.ScopeSelector), "ScopeSelector", "ScopeSelector", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceQuotaStatus) String() string { - if this == nil { - return "nil" + +func (m *NFSVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Namespace) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceCondition) Size() (n int) { + if m == nil { + return 0 } - mapStringForHard += "}" - keysForUsed := make([]string, 0, len(this.Used)) - for k := range this.Used { - keysForUsed = append(keysForUsed, string(k)) + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - mapStringForUsed := "ResourceList{" - for _, k := range keysForUsed { - mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForUsed += "}" - s := strings.Join([]string{`&ResourceQuotaStatus{`, - `Hard:` + mapStringForHard + `,`, - `Used:` + mapStringForUsed + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceRequirements) String() string { - if this == nil { - return "nil" - } - keysForLimits := make([]string, 0, len(this.Limits)) - for k := range this.Limits { - keysForLimits = append(keysForLimits, string(k)) + +func (m *NamespaceSpec) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - mapStringForLimits := "ResourceList{" - for _, k := range keysForLimits { - mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForLimits += "}" - keysForRequests := make([]string, 0, len(this.Requests)) - for k := range this.Requests { - keysForRequests = append(keysForRequests, string(k)) + return n +} + +func (m *NamespaceStatus) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - mapStringForRequests := "ResourceList{" - for _, k := range keysForRequests { - mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForRequests += "}" - s := strings.Join([]string{`&ResourceRequirements{`, - `Limits:` + mapStringForLimits + `,`, - `Requests:` + mapStringForRequests + `,`, - `}`, - }, "") - return s + return n } -func (this *SELinuxOptions) String() string { - if this == nil { - return "nil" + +func (m *Node) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SELinuxOptions{`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Role:` + fmt.Sprintf("%v", this.Role) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Level:` + fmt.Sprintf("%v", this.Level) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ScaleIOPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeAddress) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ScaleIOVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeAffinity) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScaleIOVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScopeSelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ScopeSelector{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ScopedResourceSelectorRequirement) String() string { - if this == nil { - return "nil" + +func (m *NodeCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, - `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Secret) String() string { - if this == nil { - return "nil" + +func (m *NodeConfigSource) Size() (n int) { + if m == nil { + return 0 } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) + var l int + _ = l + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string][]byte{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + return n +} + +func (m *NodeConfigStatus) Size() (n int) { + if m == nil { + return 0 } - mapStringForData += "}" - keysForStringData := make([]string, 0, len(this.StringData)) - for k := range this.StringData { - keysForStringData = append(keysForStringData, k) + var l int + _ = l + if m.Assigned != nil { + l = m.Assigned.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - mapStringForStringData := "map[string]string{" - for _, k := range keysForStringData { - mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + if m.Active != nil { + l = m.Active.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForStringData += "}" - s := strings.Join([]string{`&Secret{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `StringData:` + mapStringForStringData + `,`, - `}`, - }, "") - return s -} -func (this *SecretEnvSource) String() string { - if this == nil { - return "nil" + if m.LastKnownGood != nil { + l = m.LastKnownGood.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&SecretEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SecretKeySelector) String() string { - if this == nil { - return "nil" + +func (m *NodeDaemonEndpoints) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SecretList) String() string { - if this == nil { - return "nil" + +func (m *NodeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SecretProjection) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&SecretProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + return n } -func (this *SecretReference) String() string { - if this == nil { - return "nil" + +func (m *NodeProxyOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SecretVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeResources) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecurityContext) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&SecurityContext{`, - `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, - `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, - `}`, - }, "") - return s + return n } -func (this *SerializedReference) String() string { - if this == nil { - return "nil" + +func (m *NodeSelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SerializedReference{`, - `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Service) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Service{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceAccount) String() string { - if this == nil { - return "nil" + +func (m *NodeSelectorRequirement) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceAccount{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccountList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceAccountList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceAccountTokenProjection) String() string { - if this == nil { - return "nil" + +func (m *NodeSelectorTerm) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceAccountTokenProjection{`, - `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, - `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceList) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServicePort) String() string { - if this == nil { - return "nil" + if len(m.MatchFields) > 0 { + for _, e := range m.MatchFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServicePort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceProxyOptions) String() string { - if this == nil { - return "nil" + +func (m *NodeSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DoNotUse_ExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) + if m.ConfigSource != nil { + l = m.ConfigSource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + if len(m.PodCIDRs) > 0 { + for _, s := range m.PodCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForSelector += "}" - s := strings.Join([]string{`&ServiceSpec{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, - `Selector:` + mapStringForSelector + `,`, - `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, - `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, - `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, - `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, - `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, - `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, - `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, - `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, - `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceStatus) String() string { - if this == nil { - return "nil" + +func (m *NodeStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SessionAffinityConfig) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&SessionAffinityConfig{`, - `ClientIP:` + strings.Replace(fmt.Sprintf("%v", this.ClientIP), "ClientIPConfig", "ClientIPConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "ObjectReference", "ObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSVolumeSource) String() string { - if this == nil { - return "nil" + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&StorageOSVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Sysctl) String() string { - if this == nil { - return "nil" + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Sysctl{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *TCPSocketAction) String() string { - if this == nil { - return "nil" + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&TCPSocketAction{`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *Taint) String() string { - if this == nil { - return "nil" + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Taint{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Toleration) String() string { - if this == nil { - return "nil" + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Toleration{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, - `}`, - }, "") - return s + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *TopologySelectorLabelRequirement) String() string { - if this == nil { - return "nil" + +func (m *NodeSystemInfo) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *TopologySelectorTerm) String() string { - if this == nil { - return "nil" + +func (m *ObjectFieldSelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TopologySelectorTerm{`, - `MatchLabelExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchLabelExpressions), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *TypedLocalObjectReference) String() string { - if this == nil { - return "nil" + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TypedLocalObjectReference{`, - `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Volume) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolume) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Volume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeDevice) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaim) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeDevice{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeMount) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeMount{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, - `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeNodeAffinity) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeNodeAffinity{`, - `Required:` + strings.Replace(fmt.Sprintf("%v", this.Required), "NodeSelector", "NodeSelector", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *VolumeProjection) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeProjection{`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, - `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DataSource != nil { + l = m.DataSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *VolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeSource{`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, - `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, - `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *VsphereVirtualDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, - `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, - `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *WeightedPodAffinityTerm) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&WeightedPodAffinityTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (m *PersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Pod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDNSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodExecOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + return n +} + +func (m *PodPortForwardOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + +func (m *PodProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodReadinessGate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConditionType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if len(m.Sysctls) > 0 { + for _, e := range m.Sysctls { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSignature) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ShareProcessNamespace != nil { + n += 3 + } + if len(m.ReadinessGates) > 0 { + for _, e := range m.ReadinessGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.EnableServiceLinks != nil { + n += 3 + } + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Overhead) > 0 { + for k, v := range m.Overhead { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.TopologySpreadConstraints) > 0 { + for _, e := range m.TopologySpreadConstraints { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.NominatedNodeName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PodIPs) > 0 { + for _, e := range m.PodIPs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EphemeralContainerStatuses) > 0 { + for _, e := range m.EphemeralContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatusResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PortworxVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PreferAvoidPodsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PreferredSchedulingTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Probe) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n +} + +func (m *ProjectedVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *QuobyteVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tenant) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RBDPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RBDVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RangeAllocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationController) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ScopeSelector != nil { + l = m.ScopeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScaleIOVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScopeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScopedResourceSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScopeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Secret) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SecretEnvSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretKeySelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 2 + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if m.ProcMount != nil { + l = len(*m.ProcMount) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SerializedReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 2 + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountTokenProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + n += 2 + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPFamily != nil { + l = len(*m.IPFamily) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SessionAffinityConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientIP != nil { + l = m.ClientIP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Sysctl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Toleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *TopologySelectorLabelRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySelectorTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchLabelExpressions) > 0 { + for _, e := range m.MatchLabelExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySpreadConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.MaxSkew)) + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenUnsatisfiable) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypedLocalObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.MountPropagation != nil { + l = len(*m.MountPropagation) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.SubPathExpr) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Required != nil { + l = m.Required.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountToken != nil { + l = m.ServiceAccountToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WindowsSecurityContextOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GMSACredentialSpecName != nil { + l = len(*m.GMSACredentialSpecName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GMSACredentialSpec != nil { + l = len(*m.GMSACredentialSpec) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUserName != nil { + l = len(*m.RunAsUserName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(this.PodAffinity.String(), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(this.PodAntiAffinity.String(), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferAvoidPods := "[]PreferAvoidPodsEntry{" + for _, f := range this.PreferAvoidPods { + repeatedStringForPreferAvoidPods += strings.Replace(strings.Replace(f.String(), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferAvoidPods += "}" + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + repeatedStringForPreferAvoidPods + `,`, + `}`, + }, "") + return s +} +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `Kind:` + valueToStringGenerated(this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFilePersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `ControllerPublishSecretRef:` + strings.Replace(this.ControllerPublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Capabilities) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderPersistentVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClientIPConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClientIPConfig{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ComponentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ComponentStatus{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMap) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForBinaryData := make([]string, 0, len(this.BinaryData)) + for k := range this.BinaryData { + keysForBinaryData = append(keysForBinaryData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + mapStringForBinaryData := "map[string][]byte{" + for _, k := range keysForBinaryData { + mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + } + mapStringForBinaryData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `BinaryData:` + mapStringForBinaryData + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ConfigMap{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapNodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(this.Waiting.String(), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(this.Running.String(), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(this.Terminated.String(), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Started:` + valueToStringGenerated(this.Started) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(this.TargetRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" + } + repeatedStringForAddresses := "[]EndpointAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForNotReadyAddresses := "[]EndpointAddress{" + for _, f := range this.NotReadyAddresses { + repeatedStringForNotReadyAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForNotReadyAddresses += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + repeatedStringForAddresses + `,`, + `NotReadyAddresses:` + repeatedStringForNotReadyAddresses + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubsets := "[]EndpointSubset{" + for _, f := range this.Subsets { + repeatedStringForSubsets += strings.Replace(strings.Replace(f.String(), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + "," + } + repeatedStringForSubsets += "}" + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + repeatedStringForSubsets + `,`, + `}`, + }, "") + return s +} +func (this *EndpointsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Endpoints{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Endpoints", "Endpoints", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EphemeralContainer{`, + `EphemeralContainerCommon:` + strings.Replace(strings.Replace(this.EphemeralContainerCommon.String(), "EphemeralContainerCommon", "EphemeralContainerCommon", 1), `&`, ``, 1) + `,`, + `TargetContainerName:` + fmt.Sprintf("%v", this.TargetContainerName) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainerCommon) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&EphemeralContainerCommon{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainers) String() string { + if this == nil { + return "nil" + } + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + s := strings.Join([]string{`&EphemeralContainers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(this.Related.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, + `}`, + }, "") + return s +} +func (this *FlexPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s +} +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForHTTPHeaders := "[]HTTPHeader{" + for _, f := range this.HTTPHeaders { + repeatedStringForHTTPHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForHTTPHeaders += "}" + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + repeatedStringForHTTPHeaders + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Handler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostAlias) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostAlias{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, + `}`, + }, "") + return s +} +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *KeyToPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(this.PostStart.String(), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(this.PreStop.String(), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" + } + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + } + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + } + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + } + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LimitRange{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LimitRange", "LimitRange", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForLimits := "[]LimitRangeItem{" + for _, f := range this.Limits { + repeatedStringForLimits += strings.Replace(strings.Replace(f.String(), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + "," + } + repeatedStringForLimits += "}" + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + repeatedStringForLimits + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]LoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LocalVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Namespace{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NamespaceCondition", "NamespaceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]PreferredSchedulingTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(this.RequiredDuringSchedulingIgnoredDuringExecution.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigSource{`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigStatus{`, + `Assigned:` + strings.Replace(this.Assigned.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Active:` + strings.Replace(this.Active.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `LastKnownGood:` + strings.Replace(this.LastKnownGood.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Node{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Node", "Node", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForNodeSelectorTerms := "[]NodeSelectorTerm{" + for _, f := range this.NodeSelectorTerms { + repeatedStringForNodeSelectorTerms += strings.Replace(strings.Replace(f.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForNodeSelectorTerms += "}" + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + repeatedStringForNodeSelectorTerms + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]NodeSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + repeatedStringForMatchFields := "[]NodeSelectorRequirement{" + for _, f := range this.MatchFields { + repeatedStringForMatchFields += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchFields += "}" + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `MatchFields:` + repeatedStringForMatchFields + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTaints := "[]Taint{" + for _, f := range this.Taints { + repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "Taint", "Taint", 1), `&`, ``, 1) + "," + } + repeatedStringForTaints += "}" + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `ConfigSource:` + strings.Replace(this.ConfigSource.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NodeCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForAddresses := "[]NodeAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForImages := "[]ContainerImage{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForVolumesAttached := "[]AttachedVolume{" + for _, f := range this.VolumesAttached { + repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumesAttached += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Addresses:` + repeatedStringForAddresses + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, + `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolumeClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PersistentVolumeClaimCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolume{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, + `Local:` + strings.Replace(this.Local.String(), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(this.ClaimRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s +} +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *PodCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]PodDNSConfigOption{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(strings.Replace(f.String(), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *PodIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodIP{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `}`, + }, "") + return s +} +func (this *PodList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Pod{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pod", "Pod", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodReadinessGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodReadinessGate{`, + `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" + } + repeatedStringForSysctls := "[]Sysctl{" + for _, f := range this.Sysctls { + repeatedStringForSysctls += strings.Replace(strings.Replace(f.String(), "Sysctl", "Sysctl", 1), `&`, ``, 1) + "," + } + repeatedStringForSysctls += "}" + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `Sysctls:` + repeatedStringForSysctls + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSignature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForContainers := "[]Container{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + repeatedStringForInitContainers := "[]Container{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForHostAliases := "[]HostAlias{" + for _, f := range this.HostAliases { + repeatedStringForHostAliases += strings.Replace(strings.Replace(f.String(), "HostAlias", "HostAlias", 1), `&`, ``, 1) + "," + } + repeatedStringForHostAliases += "}" + repeatedStringForReadinessGates := "[]PodReadinessGate{" + for _, f := range this.ReadinessGates { + repeatedStringForReadinessGates += strings.Replace(strings.Replace(f.String(), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + "," + } + repeatedStringForReadinessGates += "}" + repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" + for _, f := range this.TopologySpreadConstraints { + repeatedStringForTopologySpreadConstraints += strings.Replace(strings.Replace(f.String(), "TopologySpreadConstraint", "TopologySpreadConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForTopologySpreadConstraints += "}" + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + keysForOverhead := make([]string, 0, len(this.Overhead)) + for k := range this.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + mapStringForOverhead := "ResourceList{" + for _, k := range keysForOverhead { + mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)]) + } + mapStringForOverhead += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(this.Affinity.String(), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `HostAliases:` + repeatedStringForHostAliases + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(this.DNSConfig.String(), "PodDNSConfig", "PodDNSConfig", 1) + `,`, + `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, + `ReadinessGates:` + repeatedStringForReadinessGates + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `Overhead:` + mapStringForOverhead + `,`, + `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PodCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PodCondition", "PodCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForContainerStatuses := "[]ContainerStatus{" + for _, f := range this.ContainerStatuses { + repeatedStringForContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForContainerStatuses += "}" + repeatedStringForInitContainerStatuses := "[]ContainerStatus{" + for _, f := range this.InitContainerStatuses { + repeatedStringForInitContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainerStatuses += "}" + repeatedStringForPodIPs := "[]PodIP{" + for _, f := range this.PodIPs { + repeatedStringForPodIPs += strings.Replace(strings.Replace(f.String(), "PodIP", "PodIP", 1), `&`, ``, 1) + "," + } + repeatedStringForPodIPs += "}" + repeatedStringForEphemeralContainerStatuses := "[]ContainerStatus{" + for _, f := range this.EphemeralContainerStatuses { + repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainerStatuses += "}" + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `ContainerStatuses:` + repeatedStringForContainerStatuses + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + repeatedStringForInitContainerStatuses + `,`, + `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, + `PodIPs:` + repeatedStringForPodIPs + `,`, + `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `}`, + }, "") + return s +} +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvictionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForSources := "[]VolumeProjection{" + for _, f := range this.Sources { + repeatedStringForSources += strings.Replace(strings.Replace(f.String(), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + "," + } + repeatedStringForSources += "}" + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + repeatedStringForSources + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, + `}`, + }, "") + return s +} +func (this *RBDPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDPersistentVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ReplicationController{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ReplicationControllerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Divisor), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `ScopeSelector:` + strings.Replace(this.ScopeSelector.String(), "ScopeSelector", "ScopeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + } + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScopeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]ScopedResourceSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + s := strings.Join([]string{`&ScopeSelector{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `}`, + }, "") + return s +} +func (this *ScopedResourceSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, + `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + } + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `}`, + }, "") + return s +} +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Secret{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Secret", "Secret", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(this.Capabilities.String(), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" + } + repeatedStringForSecrets := "[]ObjectReference{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceAccount{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountTokenProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountTokenProjection{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Service{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Service", "Service", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ServicePort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + repeatedStringForPorts + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, + `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, + `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SessionAffinityConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionAffinityConfig{`, + `ClientIP:` + strings.Replace(this.ClientIP.String(), "ClientIPConfig", "ClientIPConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *Taint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorLabelRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchLabelExpressions := "[]TopologySelectorLabelRequirement{" + for _, f := range this.MatchLabelExpressions { + repeatedStringForMatchLabelExpressions += strings.Replace(strings.Replace(f.String(), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchLabelExpressions += "}" + s := strings.Join([]string{`&TopologySelectorTerm{`, + `MatchLabelExpressions:` + repeatedStringForMatchLabelExpressions + `,`, + `}`, + }, "") + return s +} +func (this *TopologySpreadConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySpreadConstraint{`, + `MaxSkew:` + fmt.Sprintf("%v", this.MaxSkew) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TypedLocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypedLocalObjectReference{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, + `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeAffinity{`, + `Required:` + strings.Replace(this.Required.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(this.EmptyDir.String(), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(this.GitRepo.String(), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(this.Projected.String(), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, + `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsSecurityContextOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsSecurityContextOptions{`, + `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, + `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, + `RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvoidPods) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) + m.Kind = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerPublishSecretRef == nil { + m.ControllerPublishSecretRef = &SecretReference{} + } + if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeStageSecretRef == nil { + m.NodeStageSecretRef = &SecretReference{} + } + if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &SecretReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerExpandSecretRef == nil { + m.ControllerExpandSecretRef = &SecretReference{} + } + if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &LocalObjectReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capabilities) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17541,7 +29113,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17549,15 +29121,15 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17569,7 +29141,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17579,14 +29151,17 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17598,7 +29173,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17608,35 +29183,19 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17646,67 +29205,29 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Affinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Affinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17716,28 +29237,27 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeAffinity == nil { - m.NodeAffinity = &NodeAffinity{} - } - if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SecretFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17749,7 +29269,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17758,21 +29278,24 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodAffinity == nil { - m.PodAffinity = &PodAffinity{} + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} } - if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17782,25 +29305,12 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodAntiAffinity == nil { - m.PodAntiAffinity = &PodAntiAffinity{} - } - if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17810,6 +29320,9 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -17822,7 +29335,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } return nil } -func (m *AttachedVolume) Unmarshal(dAtA []byte) error { +func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17837,7 +29350,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17845,15 +29358,15 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17865,7 +29378,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17875,14 +29388,17 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17894,7 +29410,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17904,64 +29420,37 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DevicePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AvoidPods) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ReadOnly = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17973,7 +29462,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17982,11 +29471,16 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) - if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17999,6 +29493,9 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18011,7 +29508,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18026,7 +29523,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18034,15 +29531,15 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18054,7 +29551,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18064,14 +29561,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DiskName = string(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18083,7 +29583,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18093,16 +29593,19 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18112,27 +29615,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) - m.CachingMode = &s - iNdEx = postIndex + m.ReadOnly = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18142,48 +29635,86 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - b := bool(v != 0) - m.ReadOnly = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated } - var stringLen uint64 + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18193,22 +29724,12 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex + m.TimeoutSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18218,6 +29739,9 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18230,7 +29754,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18245,7 +29769,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18253,15 +29777,15 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18273,7 +29797,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18283,14 +29807,17 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18302,7 +29829,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18312,16 +29839,19 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(dAtA[iNdEx:postIndex]) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18331,15 +29861,27 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18351,7 +29893,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18361,11 +29903,13 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.SecretNamespace = &s + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -18376,6 +29920,9 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18388,7 +29935,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18403,7 +29950,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18411,17 +29958,17 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18431,26 +29978,30 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18460,41 +30011,26 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ReadOnly = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18504,6 +30040,9 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18516,7 +30055,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Binding) Unmarshal(dAtA []byte) error { +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18531,7 +30070,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18539,15 +30078,15 @@ func (m *Binding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Binding: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18559,7 +30098,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18568,16 +30107,19 @@ func (m *Binding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18589,7 +30131,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18598,10 +30140,14 @@ func (m *Binding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18614,6 +30160,9 @@ func (m *Binding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18626,7 +30175,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } return nil } -func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMap) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18641,7 +30190,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18649,17 +30198,17 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18669,102 +30218,28 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeHandle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18776,7 +30251,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18785,11 +30260,14 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) + if m.Data == nil { + m.Data = make(map[string]string) } var mapkey string var mapvalue string @@ -18805,7 +30283,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18822,7 +30300,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18832,6 +30310,9 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -18848,7 +30329,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18858,6 +30339,9 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -18878,77 +30362,11 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.VolumeAttributes[mapkey] = mapvalue - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ControllerPublishSecretRef == nil { - m.ControllerPublishSecretRef = &SecretReference{} - } - if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeStageSecretRef == nil { - m.NodeStageSecretRef = &SecretReference{} - } - if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Data[mapkey] = mapvalue iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18960,7 +30378,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18969,177 +30387,17 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &SecretReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) + if m.BinaryData == nil { + m.BinaryData = make(map[string][]byte) } var mapkey string - var mapvalue string + mapvalue := []byte{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -19152,7 +30410,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19169,7 +30427,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19179,13 +30437,16 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapbyteLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19195,21 +30456,25 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + mapbyteLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } + if postbytesIndex > l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19225,40 +30490,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.VolumeAttributes[mapkey] = mapvalue - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &LocalObjectReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.BinaryData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -19269,6 +30501,9 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19281,7 +30516,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Capabilities) Unmarshal(dAtA []byte) error { +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19296,7 +30531,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19304,17 +30539,17 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19324,26 +30559,30 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19353,21 +30592,13 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19377,6 +30608,9 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19389,7 +30623,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19404,7 +30638,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19412,17 +30646,17 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19432,24 +30666,28 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19461,7 +30699,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19471,16 +30709,19 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19490,26 +30731,71 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19519,24 +30805,28 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19548,7 +30838,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19557,36 +30847,17 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19596,6 +30867,9 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19608,7 +30882,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19623,7 +30897,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19631,15 +30905,15 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19651,7 +30925,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19661,14 +30935,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19680,7 +30957,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19690,14 +30967,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19709,7 +30989,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19719,14 +30999,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19738,7 +31021,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19748,16 +31031,19 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19767,45 +31053,24 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19815,6 +31080,9 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19827,7 +31095,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19842,7 +31110,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19850,17 +31118,17 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19870,26 +31138,30 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19899,24 +31171,29 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -19928,45 +31205,13 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19976,6 +31221,9 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19988,7 +31236,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20003,7 +31251,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20011,17 +31259,17 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20031,26 +31279,30 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20060,26 +31312,31 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20089,17 +31346,17 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + m.DefaultMode = &v case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20109,25 +31366,13 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20137,6 +31382,9 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -20149,7 +31397,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { +func (m *Container) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20164,7 +31412,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20172,17 +31420,17 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + return fmt.Errorf("proto: Container: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20192,65 +31440,27 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20262,7 +31472,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20272,14 +31482,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20291,7 +31504,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20301,14 +31514,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20320,7 +31536,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20330,14 +31546,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20349,7 +31568,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20359,64 +31578,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20428,7 +31600,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20437,16 +31609,20 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20458,7 +31634,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20467,67 +31643,20 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20539,7 +31668,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20548,16 +31677,19 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20569,7 +31701,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20578,67 +31710,56 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20650,7 +31771,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20659,16 +31780,22 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20680,7 +31807,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20689,106 +31816,24 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Data == nil { - m.Data = make(map[string]string) + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Data[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20798,164 +31843,59 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.BinaryData == nil { - m.BinaryData = make(map[string][]byte) + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.BinaryData[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20967,7 +31907,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20976,16 +31916,22 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 16: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -20997,66 +31943,55 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21068,7 +32003,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21077,16 +32012,20 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21098,7 +32037,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21108,16 +32047,53 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21127,13 +32103,28 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} + } + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21143,6 +32134,9 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21155,7 +32149,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapList) Unmarshal(dAtA []byte) error { +func (m *ContainerImage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21170,7 +32164,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21178,17 +32172,17 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21198,27 +32192,29 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) } - var msglen int + m.SizeBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21228,23 +32224,11 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.SizeBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21254,6 +32238,9 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21266,7 +32253,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { +func (m *ContainerPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21281,7 +32268,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21289,15 +32276,15 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21309,7 +32296,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21319,16 +32306,19 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) } - var stringLen uint64 + m.HostPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21338,26 +32328,16 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.HostPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) } - var stringLen uint64 + m.ContainerPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21367,24 +32347,14 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.ContainerPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21396,7 +32366,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21406,14 +32376,17 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21425,7 +32398,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21435,10 +32408,13 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) + m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -21449,6 +32425,9 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21461,7 +32440,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { +func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21476,7 +32455,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21484,15 +32463,15 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21504,7 +32483,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21513,16 +32492,22 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21534,7 +32519,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21543,19 +32528,24 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21565,13 +32555,28 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21581,6 +32586,9 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21593,7 +32601,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21608,7 +32616,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21616,15 +32624,15 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21636,7 +32644,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21645,85 +32653,16 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21733,6 +32672,9 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21745,7 +32687,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Container) Unmarshal(dAtA []byte) error { +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21760,7 +32702,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21768,17 +32710,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) } - var stringLen uint64 + m.ExitCode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21788,26 +32730,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.ExitCode |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) } - var stringLen uint64 + m.Signal = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21817,24 +32749,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Signal |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21846,7 +32768,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21856,14 +32778,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21875,7 +32800,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21885,43 +32810,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkingDir = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21933,7 +32832,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21942,48 +32841,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21995,7 +32865,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22004,49 +32874,21 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22056,63 +32898,82 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} - } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - if msglen < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 12: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22122,28 +32983,27 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22155,7 +33015,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22165,14 +33025,70 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22184,7 +33100,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22194,14 +33110,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22213,7 +33132,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22222,21 +33141,21 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22246,15 +33165,28 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Stdin = bool(v != 0) - case 17: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -22266,17 +33198,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - m.StdinOnce = bool(v != 0) - case 18: + m.Ready = bool(v != 0) + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) } - var v int + m.RestartCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22286,17 +33218,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + m.RestartCount |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.TTY = bool(v != 0) - case 19: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22306,26 +33237,27 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 20: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22337,7 +33269,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22347,16 +33279,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + m.ImageID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 21: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22366,23 +33301,45 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Started = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22392,6 +33349,9 @@ func (m *Container) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22404,7 +33364,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerImage) Unmarshal(dAtA []byte) error { +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22419,7 +33379,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22427,46 +33387,17 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - m.SizeBytes = 0 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22476,7 +33407,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SizeBytes |= (int64(b) & 0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -22490,6 +33421,9 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22502,7 +33436,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerPort) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22517,7 +33451,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22525,17 +33459,17 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22545,116 +33479,25 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) - } - m.HostPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HostPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) - } - m.ContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ContainerPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -22665,6 +33508,9 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22677,7 +33523,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerState) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22692,7 +33538,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22700,17 +33546,17 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22720,28 +33566,27 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Waiting == nil { - m.Waiting = &ContainerStateWaiting{} - } - if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22753,7 +33598,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22762,19 +33607,22 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Running == nil { - m.Running = &ContainerStateRunning{} + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} } - if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22786,7 +33634,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22795,16 +33643,39 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Terminated == nil { - m.Terminated = &ContainerStateTerminated{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22814,6 +33685,9 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22826,7 +33700,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22841,7 +33715,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22849,15 +33723,15 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22869,7 +33743,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22878,13 +33752,37 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22894,6 +33792,9 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22906,7 +33807,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22921,7 +33822,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22929,17 +33830,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) } - m.ExitCode = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22949,35 +33850,29 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitCode |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22987,24 +33882,84 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SizeLimit == nil { + m.SizeLimit = &resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23016,7 +33971,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23026,14 +33981,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23045,7 +34003,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23054,18 +34012,24 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23075,25 +34039,27 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23105,7 +34071,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23115,10 +34081,14 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -23129,6 +34099,9 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23141,7 +34114,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { +func (m *EndpointPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23156,7 +34129,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23164,15 +34137,15 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23184,7 +34157,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23194,14 +34167,36 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23213,7 +34208,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23223,10 +34218,13 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -23237,6 +34235,9 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23249,7 +34250,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStatus) Unmarshal(dAtA []byte) error { +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23264,7 +34265,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23272,17 +34273,17 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23292,24 +34293,29 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23321,7 +34327,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23330,16 +34336,20 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23351,7 +34361,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23360,86 +34370,75 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Ready = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - m.RestartCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RestartCount |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23449,26 +34448,30 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ImageID = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23478,20 +34481,25 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -23502,6 +34510,9 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23514,7 +34525,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { +func (m *EndpointsList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23529,7 +34540,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23537,17 +34548,17 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - m.Port = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23557,62 +34568,26 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } @@ -23626,7 +34601,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23635,10 +34610,13 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) + m.Items = append(m.Items, Endpoints{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -23652,6 +34630,9 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23664,7 +34645,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23679,7 +34660,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23687,15 +34668,15 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23707,7 +34688,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23717,14 +34698,17 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Prefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23736,7 +34720,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23745,19 +34729,22 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23769,7 +34756,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23778,36 +34765,19 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23817,6 +34787,9 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23829,7 +34802,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *EnvVar) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23844,7 +34817,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23852,17 +34825,17 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23872,96 +34845,27 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23973,7 +34877,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23983,14 +34887,17 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24002,7 +34909,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24011,13 +34918,16 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SizeLimit == nil { - m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} } - if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24030,6 +34940,9 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24042,7 +34955,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointAddress) Unmarshal(dAtA []byte) error { +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24057,7 +34970,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24065,17 +34978,17 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24085,24 +34998,31 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24114,7 +35034,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24123,21 +35043,24 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24147,26 +35070,33 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24176,21 +35106,27 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -24201,6 +35137,9 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24213,7 +35152,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointPort) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24228,7 +35167,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24236,17 +35175,17 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerCommon", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24256,43 +35195,28 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.EphemeralContainerCommon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetContainerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24304,7 +35228,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24314,10 +35238,13 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.TargetContainerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -24328,6 +35255,9 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24340,7 +35270,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24355,7 +35285,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24363,17 +35293,17 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainerCommon: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainerCommon: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24383,28 +35313,29 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24414,24 +35345,121 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } @@ -24445,7 +35473,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24454,67 +35482,54 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, EndpointPort{}) + m.Ports = append(m.Ports, ContainerPort{}) if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Endpoints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24526,7 +35541,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24535,16 +35550,19 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24556,7 +35574,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24565,67 +35583,56 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24637,7 +35644,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24646,16 +35653,22 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24667,7 +35680,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24676,67 +35689,54 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24748,7 +35748,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24758,14 +35758,17 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(dAtA[iNdEx:postIndex]) + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24777,7 +35780,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24786,19 +35789,82 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24810,7 +35876,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24819,69 +35885,20 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24893,7 +35910,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24903,16 +35920,19 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24922,24 +35942,29 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24951,7 +35976,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24960,13 +35985,16 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24979,6 +36007,9 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24991,7 +36022,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVarSource) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25006,7 +36037,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25014,15 +36045,15 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainers: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainers: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25034,7 +36065,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25043,52 +36074,19 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25100,7 +36098,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25109,46 +36107,14 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} - } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25161,6 +36127,9 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25188,7 +36157,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25216,7 +36185,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25225,6 +36194,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25246,7 +36218,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25255,6 +36227,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25276,7 +36251,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25286,6 +36261,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25305,7 +36283,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25315,6 +36293,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25334,7 +36315,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25343,6 +36324,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25364,7 +36348,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25373,6 +36357,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25394,7 +36381,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25403,6 +36390,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25424,7 +36414,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -25443,7 +36433,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25453,6 +36443,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25472,7 +36465,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25481,6 +36474,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25502,7 +36498,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25511,6 +36507,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25535,7 +36534,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25545,6 +36544,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25564,7 +36566,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25573,6 +36575,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25597,7 +36602,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25607,6 +36612,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25626,7 +36634,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25636,6 +36644,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25650,6 +36661,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25677,7 +36691,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25705,7 +36719,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25714,6 +36728,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25735,7 +36752,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25744,6 +36761,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25761,6 +36781,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25788,7 +36811,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25816,7 +36839,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -25835,7 +36858,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25844,6 +36867,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25865,7 +36891,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25875,6 +36901,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25889,6 +36918,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25916,7 +36948,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25944,7 +36976,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25954,6 +36986,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25973,7 +37008,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25983,6 +37018,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25997,6 +37035,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26024,7 +37065,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26052,7 +37093,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26062,6 +37103,9 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26076,6 +37120,9 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26103,7 +37150,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26131,7 +37178,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26141,6 +37188,9 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26160,7 +37210,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -26180,7 +37230,176 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26190,11 +37409,50 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) @@ -26209,7 +37467,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26217,144 +37475,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { m.ReadOnly = bool(v != 0) case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26366,7 +37487,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26375,59 +37496,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } @@ -26448,7 +37519,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26465,7 +37536,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26475,6 +37546,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -26491,7 +37565,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26501,6 +37575,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -26532,6 +37609,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26559,7 +37639,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26587,7 +37667,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26597,6 +37677,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26616,7 +37699,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26626,6 +37709,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26645,7 +37731,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26654,6 +37740,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26678,7 +37767,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26698,7 +37787,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26707,6 +37796,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26727,7 +37819,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26744,7 +37836,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26754,6 +37846,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -26770,7 +37865,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26780,6 +37875,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -26811,6 +37909,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26838,7 +37939,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26866,7 +37967,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26876,6 +37977,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26895,7 +37999,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26905,6 +38009,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26919,6 +38026,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26946,7 +38056,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26974,7 +38084,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26984,6 +38094,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27003,7 +38116,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27013,6 +38126,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27032,7 +38148,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift + m.Partition |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -27051,7 +38167,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27066,6 +38182,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27093,7 +38212,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27121,7 +38240,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27131,6 +38250,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27150,7 +38272,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27160,6 +38282,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27179,7 +38304,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27189,6 +38314,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27203,6 +38331,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27230,7 +38361,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27258,7 +38389,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27268,6 +38399,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27287,7 +38421,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27297,6 +38431,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27316,7 +38453,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27336,7 +38473,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27346,6 +38483,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27361,6 +38501,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27388,7 +38531,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27416,7 +38559,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27426,6 +38569,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27445,7 +38591,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27455,6 +38601,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27474,7 +38623,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27489,6 +38638,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27516,7 +38668,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27544,7 +38696,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27554,6 +38706,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27573,7 +38728,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27582,6 +38737,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27603,7 +38761,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27613,6 +38771,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27632,7 +38793,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27642,6 +38803,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27661,7 +38825,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27670,6 +38834,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27687,6 +38854,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27714,7 +38884,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27742,7 +38912,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27752,6 +38922,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27771,7 +38944,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27781,6 +38954,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27795,6 +38971,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27822,7 +39001,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27850,7 +39029,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27859,6 +39038,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27883,7 +39065,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27892,6 +39074,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27916,7 +39101,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27925,6 +39110,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27944,6 +39132,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27971,7 +39162,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27999,7 +39190,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28009,6 +39200,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28028,7 +39222,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28038,6 +39232,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28052,6 +39249,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28079,7 +39279,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28107,7 +39307,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28117,6 +39317,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28136,7 +39339,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28146,6 +39349,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28161,6 +39367,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28188,7 +39397,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28216,7 +39425,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28226,6 +39435,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28245,7 +39457,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28255,6 +39467,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28274,7 +39489,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28293,7 +39508,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28303,6 +39518,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28322,7 +39540,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28332,6 +39550,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28351,7 +39572,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28371,7 +39592,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28381,6 +39602,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28400,7 +39624,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28420,7 +39644,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28429,6 +39653,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28453,7 +39680,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28473,7 +39700,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28483,6 +39710,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28498,6 +39728,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28525,7 +39758,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28553,7 +39786,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28563,6 +39796,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28582,7 +39818,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28592,6 +39828,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28611,7 +39850,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28630,7 +39869,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28640,6 +39879,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28659,7 +39901,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28669,6 +39911,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28688,7 +39933,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28708,7 +39953,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28718,6 +39963,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28737,7 +39985,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28757,7 +40005,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28766,6 +40014,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28790,7 +40041,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28810,7 +40061,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28820,6 +40071,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28835,6 +40089,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28862,7 +40119,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28890,7 +40147,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28900,6 +40157,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28919,7 +40179,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28929,6 +40189,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28948,7 +40211,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28963,6 +40226,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28990,7 +40256,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29018,7 +40284,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29027,6 +40293,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29051,7 +40320,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29060,6 +40329,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29079,6 +40351,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29106,7 +40381,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29134,7 +40409,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29143,6 +40418,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29164,7 +40442,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29173,6 +40451,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29189,6 +40470,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29216,7 +40500,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29244,7 +40528,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29254,6 +40538,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29273,7 +40560,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29282,6 +40569,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29289,7 +40579,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Max = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29302,7 +40592,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29319,7 +40609,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29329,6 +40619,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29345,7 +40638,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29354,13 +40647,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29396,7 +40689,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29405,6 +40698,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29412,7 +40708,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Min = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29425,7 +40721,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29442,7 +40738,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29452,6 +40748,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29468,7 +40767,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29477,13 +40776,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29519,7 +40818,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29528,6 +40827,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29535,7 +40837,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Default = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29548,7 +40850,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29565,7 +40867,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29575,6 +40877,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29591,7 +40896,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29600,13 +40905,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29642,7 +40947,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29651,6 +40956,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29658,7 +40966,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.DefaultRequest = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29671,7 +40979,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29688,7 +40996,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29698,6 +41006,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29714,7 +41025,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29723,13 +41034,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29765,7 +41076,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29774,6 +41085,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29781,7 +41095,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.MaxLimitRequestRatio = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29794,7 +41108,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29811,7 +41125,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29821,6 +41135,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29837,7 +41154,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29846,13 +41163,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29883,6 +41200,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29910,7 +41230,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29938,7 +41258,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29947,6 +41267,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29968,7 +41291,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29977,6 +41300,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29994,6 +41320,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30021,7 +41350,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30049,7 +41378,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30058,6 +41387,9 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30075,6 +41407,9 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30102,7 +41437,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30130,7 +41465,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30139,6 +41474,9 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30160,7 +41498,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30169,10 +41507,13 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -30186,6 +41527,9 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30213,7 +41557,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30241,7 +41585,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30251,6 +41595,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30270,7 +41617,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30280,6 +41627,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30294,6 +41644,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30321,7 +41674,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30349,7 +41702,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30358,6 +41711,9 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30375,6 +41731,9 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30402,7 +41761,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30430,7 +41789,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30440,6 +41799,9 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30454,6 +41816,9 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30481,7 +41846,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30509,7 +41874,157 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30519,16 +42034,19 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30538,22 +42056,12 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30563,6 +42071,9 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30575,7 +42086,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *Namespace) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30590,7 +42101,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30598,17 +42109,17 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30618,26 +42129,30 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Server = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30647,26 +42162,30 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30676,12 +42195,25 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30691,6 +42223,9 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30703,7 +42238,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Namespace) Unmarshal(dAtA []byte) error { +func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30718,7 +42253,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30726,17 +42261,17 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30746,25 +42281,59 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = NamespaceConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30776,7 +42345,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30785,18 +42354,21 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30806,21 +42378,55 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -30831,6 +42437,9 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30858,7 +42467,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30886,7 +42495,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30895,6 +42504,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30916,7 +42528,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30925,6 +42537,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30942,6 +42557,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30969,7 +42587,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30997,7 +42615,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31007,6 +42625,9 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31021,6 +42642,9 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31048,7 +42672,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31076,7 +42700,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31086,11 +42710,48 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31100,6 +42761,9 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31127,7 +42791,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31155,7 +42819,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31164,6 +42828,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31185,7 +42852,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31194,6 +42861,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31215,7 +42885,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31224,6 +42894,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31240,6 +42913,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31267,7 +42943,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31295,7 +42971,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31305,6 +42981,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31324,7 +43003,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31334,6 +43013,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31348,6 +43030,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31375,7 +43060,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31403,7 +43088,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31412,6 +43097,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31436,7 +43124,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31445,6 +43133,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31462,6 +43153,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31489,7 +43183,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31517,7 +43211,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31527,6 +43221,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31546,7 +43243,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31556,6 +43253,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31575,7 +43275,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31584,6 +43284,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31605,7 +43308,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31614,6 +43317,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31635,7 +43341,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31645,6 +43351,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31664,7 +43373,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31674,6 +43383,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31688,6 +43400,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31715,7 +43430,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31743,7 +43458,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31752,6 +43467,9 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31771,6 +43489,9 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31798,7 +43519,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31826,7 +43547,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31835,6 +43556,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31859,7 +43583,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31868,6 +43592,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31892,7 +43619,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31901,6 +43628,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31925,7 +43655,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31935,6 +43665,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31949,6 +43682,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31976,7 +43712,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32004,7 +43740,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32013,6 +43749,9 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32029,6 +43768,9 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32056,7 +43798,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32084,7 +43826,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32093,6 +43835,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32114,7 +43859,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32123,6 +43868,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32140,6 +43888,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32167,7 +43918,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32195,7 +43946,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32205,6 +43956,9 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32219,6 +43973,9 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32246,7 +44003,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32274,7 +44031,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32283,6 +44040,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32290,7 +44050,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -32303,7 +44063,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32320,7 +44080,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32330,6 +44090,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -32346,7 +44109,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32355,13 +44118,13 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -32392,6 +44155,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32419,7 +44185,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32447,7 +44213,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32456,6 +44222,9 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32473,6 +44242,9 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32500,7 +44272,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32528,7 +44300,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32538,6 +44310,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32557,7 +44332,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32567,6 +44342,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32586,7 +44364,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32596,6 +44374,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32610,6 +44391,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32637,7 +44421,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32665,7 +44449,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32674,6 +44458,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32696,7 +44483,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32705,6 +44492,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32722,6 +44512,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32749,7 +44542,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32777,7 +44570,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32787,6 +44580,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32806,7 +44602,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32816,6 +44612,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32835,7 +44634,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32845,6 +44644,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32864,7 +44666,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32884,7 +44686,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32893,6 +44695,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32915,7 +44720,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32924,6 +44729,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32934,6 +44742,38 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDRs = append(m.PodCIDRs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32943,6 +44783,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32970,7 +44813,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32998,7 +44841,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33007,6 +44850,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33014,7 +44860,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -33027,7 +44873,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33044,7 +44890,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33054,6 +44900,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -33070,7 +44919,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33079,13 +44928,13 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -33121,7 +44970,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33130,6 +44979,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33137,7 +44989,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { m.Allocatable = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -33150,7 +45002,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33167,7 +45019,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33177,6 +45029,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -33193,7 +45048,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33202,13 +45057,13 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -33244,7 +45099,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33254,6 +45109,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33273,7 +45131,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33282,6 +45140,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33304,7 +45165,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33313,6 +45174,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33335,7 +45199,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33344,6 +45208,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33365,7 +45232,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33374,6 +45241,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33395,7 +45265,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33404,6 +45274,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33426,7 +45299,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33436,6 +45309,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33455,7 +45331,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33464,6 +45340,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33486,7 +45365,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33495,6 +45374,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33514,6 +45396,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33541,7 +45426,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33569,7 +45454,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33579,6 +45464,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33598,7 +45486,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33608,6 +45496,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33627,7 +45518,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33637,6 +45528,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33656,7 +45550,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33666,6 +45560,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33685,7 +45582,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33695,6 +45592,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33714,7 +45614,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33724,6 +45624,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33743,7 +45646,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33753,6 +45656,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33772,7 +45678,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33782,6 +45688,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33801,7 +45710,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33811,6 +45720,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33830,7 +45742,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33840,6 +45752,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33854,6 +45769,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33881,7 +45799,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33909,7 +45827,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33919,6 +45837,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33938,7 +45859,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33948,6 +45869,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33962,6 +45886,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33989,7 +45916,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34017,7 +45944,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34027,6 +45954,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34046,7 +45976,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34056,6 +45986,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34075,7 +46008,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34085,6 +46018,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34104,7 +46040,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34114,6 +46050,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34133,7 +46072,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34143,6 +46082,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34162,7 +46104,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34172,6 +46114,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34191,7 +46136,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34201,6 +46146,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34215,6 +46163,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34242,7 +46193,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34270,7 +46221,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34279,6 +46230,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34300,7 +46254,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34309,6 +46263,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34330,7 +46287,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34339,6 +46296,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34355,6 +46315,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34382,7 +46345,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34410,7 +46373,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34419,6 +46382,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34440,7 +46406,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34449,6 +46415,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34470,7 +46439,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34479,6 +46448,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34495,6 +46467,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34522,7 +46497,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34550,7 +46525,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34560,6 +46535,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34579,7 +46557,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34589,6 +46567,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34608,7 +46589,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34617,6 +46598,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34638,7 +46622,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34647,6 +46631,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34668,7 +46655,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34678,6 +46665,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34697,7 +46687,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34707,6 +46697,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34721,6 +46714,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34748,7 +46744,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34776,7 +46772,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34785,6 +46781,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34806,7 +46805,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34815,6 +46814,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34832,6 +46834,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34859,7 +46864,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34887,7 +46892,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34897,6 +46902,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34916,7 +46924,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34925,6 +46933,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34946,7 +46957,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34956,6 +46967,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34975,7 +46989,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34984,11 +46998,14 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -35008,7 +47025,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35018,6 +47035,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35038,7 +47058,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35048,6 +47068,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35068,7 +47091,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35077,6 +47100,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35096,6 +47122,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35123,7 +47152,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35151,7 +47180,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35161,6 +47190,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35180,7 +47212,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35190,6 +47222,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35209,7 +47244,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35218,6 +47253,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35225,7 +47263,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -35238,7 +47276,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35255,7 +47293,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35265,6 +47303,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -35281,7 +47322,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35290,13 +47331,13 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -35332,7 +47373,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35341,6 +47382,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35358,6 +47402,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35385,7 +47432,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35413,7 +47460,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35423,6 +47470,9 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35442,7 +47492,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35457,6 +47507,9 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35484,7 +47537,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35512,7 +47565,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35521,6 +47574,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35542,7 +47598,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35551,6 +47607,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35568,6 +47627,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35595,7 +47657,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35623,7 +47685,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35632,6 +47694,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35656,7 +47721,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35665,6 +47730,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35689,7 +47757,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35698,6 +47766,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35722,7 +47793,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35731,6 +47802,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35755,7 +47829,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35764,6 +47838,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35788,7 +47865,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35797,6 +47874,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35821,7 +47901,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35830,6 +47910,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35854,7 +47937,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35863,6 +47946,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35887,7 +47973,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35896,6 +47982,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35920,7 +48009,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35929,6 +48018,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35953,7 +48045,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35962,6 +48054,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35986,7 +48081,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35995,6 +48090,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36019,7 +48117,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36028,6 +48126,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36052,7 +48153,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36061,6 +48162,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36085,7 +48189,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36094,6 +48198,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36118,7 +48225,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36127,6 +48234,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36151,7 +48261,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36160,6 +48270,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36184,7 +48297,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36193,6 +48306,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36217,7 +48333,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36226,6 +48342,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36250,7 +48369,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36259,6 +48378,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36283,7 +48405,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36292,6 +48414,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36316,7 +48441,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36325,6 +48450,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36344,6 +48472,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36371,7 +48502,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36399,7 +48530,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36408,6 +48539,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36415,7 +48549,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -36428,7 +48562,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36445,7 +48579,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36455,6 +48589,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -36471,7 +48608,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36480,13 +48617,13 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -36522,7 +48659,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36531,6 +48668,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36552,7 +48692,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36562,6 +48702,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36581,7 +48724,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36590,6 +48733,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36614,7 +48760,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36624,6 +48770,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36643,7 +48792,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36653,6 +48802,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36672,7 +48824,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36682,6 +48834,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36701,7 +48856,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36711,6 +48866,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36731,7 +48889,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36740,6 +48898,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36759,6 +48920,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36786,7 +48950,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36814,7 +48978,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36824,6 +48988,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36843,7 +49010,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36853,6 +49020,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36872,7 +49042,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36882,6 +49052,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36896,6 +49069,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36923,7 +49099,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36951,7 +49127,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36961,6 +49137,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36980,7 +49159,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36990,6 +49169,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37004,6 +49186,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37031,7 +49216,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37059,7 +49244,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37068,6 +49253,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37089,7 +49277,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37098,6 +49286,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37119,7 +49310,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37128,6 +49319,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37144,6 +49338,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37171,7 +49368,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37199,7 +49396,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37208,6 +49405,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37230,7 +49430,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37239,6 +49439,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37256,6 +49459,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37283,7 +49489,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37311,7 +49517,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37320,11 +49526,14 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.LabelSelector = &v1.LabelSelector{} } if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -37344,7 +49553,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37354,6 +49563,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37373,7 +49585,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37383,6 +49595,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37397,6 +49612,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37424,7 +49642,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37452,7 +49670,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37461,6 +49679,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37483,7 +49704,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37492,6 +49713,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37509,6 +49733,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37536,7 +49763,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37564,7 +49791,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37584,7 +49811,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37604,7 +49831,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37624,7 +49851,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37644,7 +49871,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37654,6 +49881,9 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37668,6 +49898,9 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37695,7 +49928,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37723,7 +49956,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37733,6 +49966,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37752,7 +49988,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37762,6 +49998,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37781,7 +50020,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37790,6 +50029,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37811,7 +50053,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37820,6 +50062,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37841,7 +50086,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37851,6 +50096,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37870,7 +50118,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37880,6 +50128,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37894,6 +50145,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37921,7 +50175,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37949,7 +50203,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37959,6 +50213,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37978,7 +50235,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37988,6 +50245,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38007,7 +50267,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38016,6 +50276,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38033,6 +50296,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38060,7 +50326,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38088,7 +50354,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38098,6 +50364,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38117,7 +50386,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38127,6 +50396,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38142,6 +50414,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38169,7 +50444,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38197,7 +50472,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38217,7 +50492,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38237,7 +50512,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38257,7 +50532,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38277,7 +50552,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38287,6 +50562,9 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38306,7 +50584,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38316,6 +50594,9 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38330,6 +50611,94 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38357,7 +50726,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38385,7 +50754,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38394,6 +50763,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38415,7 +50787,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38424,6 +50796,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38441,6 +50816,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38468,7 +50846,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38496,7 +50874,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38506,6 +50884,9 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38525,7 +50906,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38545,7 +50926,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38565,7 +50946,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38585,7 +50966,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38594,11 +50975,14 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.SinceTime = &v1.Time{} } if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38618,7 +51002,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38638,7 +51022,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38658,7 +51042,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38673,6 +51057,9 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38700,7 +51087,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38726,7 +51113,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -38743,7 +51130,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38752,9 +51139,23 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]int32, 0, elementCount) + } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { @@ -38766,7 +51167,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -38785,6 +51186,9 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38812,7 +51216,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38840,7 +51244,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38850,6 +51254,9 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38864,6 +51271,9 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38891,7 +51301,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38919,7 +51329,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38929,6 +51339,9 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38943,6 +51356,9 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38970,7 +51386,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38998,7 +51414,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39007,6 +51423,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39031,7 +51450,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39051,7 +51470,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39070,7 +51489,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39087,7 +51506,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39096,9 +51515,23 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } for iNdEx < postIndex { var v int64 for shift := uint(0); ; shift += 7 { @@ -39110,7 +51543,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39134,7 +51567,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39154,7 +51587,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39174,7 +51607,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39183,6 +51616,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39191,6 +51627,42 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -39200,6 +51672,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39227,7 +51702,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39255,7 +51730,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39264,11 +51739,14 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodController == nil { - m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} + m.PodController = &v1.OwnerReference{} } if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -39283,6 +51761,9 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39310,7 +51791,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39338,7 +51819,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39347,6 +51828,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39369,7 +51853,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39378,6 +51862,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39400,7 +51887,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39410,6 +51897,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39429,7 +51919,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39449,7 +51939,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39469,7 +51959,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39479,6 +51969,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39498,7 +51991,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39507,6 +52000,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39527,7 +52023,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39544,7 +52040,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39554,6 +52050,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -39570,7 +52069,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39580,6 +52079,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -39616,7 +52118,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39626,6 +52128,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39645,7 +52150,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39655,6 +52160,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39674,7 +52182,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39684,6 +52192,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39703,7 +52214,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39723,7 +52234,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39743,7 +52254,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39763,7 +52274,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39772,6 +52283,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39796,7 +52310,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39805,6 +52319,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39827,7 +52344,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39837,6 +52354,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39856,7 +52376,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39866,6 +52386,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39885,7 +52408,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39894,6 +52417,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39918,7 +52444,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39928,6 +52454,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39947,7 +52476,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39956,6 +52485,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39978,7 +52510,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39999,7 +52531,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40008,6 +52540,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40030,7 +52565,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40039,6 +52574,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40061,7 +52599,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40071,6 +52609,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40090,7 +52631,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40110,7 +52651,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40119,6 +52660,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40143,7 +52687,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40164,7 +52708,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40173,6 +52717,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40195,7 +52742,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40205,6 +52752,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40225,13 +52775,243 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } b := bool(v != 0) m.EnableServiceLinks = &b + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Overhead[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{}) + if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40241,6 +53021,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40268,7 +53051,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40296,7 +53079,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40306,6 +53089,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40325,7 +53111,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40334,6 +53120,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40356,7 +53145,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40366,6 +53155,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40385,7 +53177,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40395,6 +53187,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40414,7 +53209,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40424,6 +53219,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40443,7 +53241,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40453,6 +53251,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40472,7 +53273,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40481,11 +53282,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.StartTime = &v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40505,7 +53309,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40514,6 +53318,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40536,7 +53343,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40546,6 +53353,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40565,7 +53375,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40574,6 +53384,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40596,7 +53409,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40606,11 +53419,82 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIPs = append(m.PodIPs, PodIP{}) + if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{}) + if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40620,6 +53504,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40647,7 +53534,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40675,7 +53562,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40684,6 +53571,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40705,7 +53595,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40714,6 +53604,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40730,6 +53623,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40757,7 +53653,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40785,7 +53681,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40794,6 +53690,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40815,7 +53714,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40824,6 +53723,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40840,6 +53742,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40867,7 +53772,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40895,7 +53800,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40904,6 +53809,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40925,7 +53833,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40934,6 +53842,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40951,6 +53862,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40978,7 +53892,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41006,7 +53920,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41015,6 +53929,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41036,7 +53953,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41045,6 +53962,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41061,6 +53981,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41088,7 +54011,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41116,7 +54039,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41126,6 +54049,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41145,7 +54071,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41155,6 +54081,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41174,7 +54103,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41189,6 +54118,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41216,7 +54148,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41244,7 +54176,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41254,6 +54186,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41269,6 +54204,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41296,7 +54234,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41324,7 +54262,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41333,6 +54271,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41354,7 +54295,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41363,6 +54304,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41384,7 +54328,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41394,6 +54338,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41413,7 +54360,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41423,6 +54370,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41437,6 +54387,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41464,7 +54417,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41492,7 +54445,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift + m.Weight |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41511,7 +54464,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41520,6 +54473,9 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41536,6 +54492,9 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41563,7 +54522,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41591,7 +54550,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41600,6 +54559,9 @@ func (m *Probe) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41621,7 +54583,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift + m.InitialDelaySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41640,7 +54602,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeoutSeconds |= (int32(b) & 0x7F) << shift + m.TimeoutSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41659,7 +54621,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PeriodSeconds |= (int32(b) & 0x7F) << shift + m.PeriodSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41678,7 +54640,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SuccessThreshold |= (int32(b) & 0x7F) << shift + m.SuccessThreshold |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41697,7 +54659,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FailureThreshold |= (int32(b) & 0x7F) << shift + m.FailureThreshold |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41711,6 +54673,9 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41738,7 +54703,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41766,7 +54731,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41775,6 +54740,9 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41797,7 +54765,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41812,6 +54780,9 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41839,7 +54810,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41867,7 +54838,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41877,6 +54848,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41896,7 +54870,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41906,6 +54880,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41925,7 +54902,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41945,7 +54922,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41955,6 +54932,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41974,7 +54954,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41984,6 +54964,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42003,7 +54986,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42013,6 +54996,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42027,6 +55013,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42054,7 +55043,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42082,7 +55071,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42092,6 +55081,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42111,7 +55103,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42121,6 +55113,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42140,7 +55135,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42150,6 +55145,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42169,7 +55167,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42179,6 +55177,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42198,7 +55199,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42208,6 +55209,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42227,7 +55231,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42237,6 +55241,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42256,7 +55263,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42265,6 +55272,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42289,7 +55299,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42304,6 +55314,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42331,7 +55344,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42359,7 +55372,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42369,6 +55382,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42388,7 +55404,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42398,6 +55414,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42417,7 +55436,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42427,6 +55446,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42446,7 +55468,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42456,6 +55478,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42475,7 +55500,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42485,6 +55510,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42504,7 +55532,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42514,6 +55542,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42533,7 +55564,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42542,6 +55573,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42566,7 +55600,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42581,6 +55615,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42608,7 +55645,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42636,7 +55673,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42645,6 +55682,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42666,7 +55706,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42676,6 +55716,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42695,7 +55738,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42704,6 +55747,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42721,6 +55767,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42748,7 +55797,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42776,7 +55825,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42785,6 +55834,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42806,7 +55858,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42815,6 +55867,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42836,7 +55891,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42845,6 +55900,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42861,6 +55919,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42888,7 +55949,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42916,7 +55977,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42926,6 +55987,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42945,7 +56009,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42955,6 +56019,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42974,7 +56041,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42983,6 +56050,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43004,7 +56074,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43014,6 +56084,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43033,7 +56106,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43043,6 +56116,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43057,6 +56133,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43084,7 +56163,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43112,7 +56191,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43121,6 +56200,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43142,7 +56224,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43151,6 +56233,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43168,6 +56253,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43195,7 +56283,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43223,7 +56311,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43243,7 +56331,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43252,6 +56340,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43272,7 +56363,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43289,7 +56380,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43299,6 +56390,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -43315,7 +56409,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43325,6 +56419,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -43361,7 +56458,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43370,6 +56467,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43394,7 +56494,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43408,6 +56508,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43435,7 +56538,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43463,7 +56566,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43482,7 +56585,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43501,7 +56604,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -43520,7 +56623,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43539,7 +56642,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43558,7 +56661,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43567,6 +56670,9 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43584,6 +56690,9 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43611,7 +56720,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43639,7 +56748,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43649,6 +56758,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43668,7 +56780,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43678,6 +56790,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43697,7 +56812,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43706,6 +56821,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43722,6 +56840,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43749,7 +56870,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43777,7 +56898,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43786,6 +56907,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43807,7 +56931,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43816,6 +56940,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43837,7 +56964,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43846,6 +56973,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43862,6 +56992,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43889,7 +57022,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43917,7 +57050,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43926,6 +57059,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43947,7 +57083,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43956,6 +57092,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43973,6 +57112,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44000,7 +57142,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44028,7 +57170,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44037,6 +57179,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44044,7 +57189,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { m.Hard = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44057,7 +57202,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44074,7 +57219,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44084,6 +57229,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44100,7 +57248,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44109,13 +57257,13 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44151,7 +57299,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44161,6 +57309,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44180,7 +57331,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44189,6 +57340,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44208,6 +57362,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44235,7 +57392,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44263,7 +57420,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44272,6 +57429,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44279,7 +57439,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { m.Hard = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44292,7 +57452,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44309,7 +57469,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44319,6 +57479,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44335,7 +57498,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44344,13 +57507,13 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44386,7 +57549,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44395,6 +57558,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44402,7 +57568,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { m.Used = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44415,7 +57581,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44432,7 +57598,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44442,6 +57608,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44458,7 +57627,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44467,13 +57636,13 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44504,6 +57673,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44531,7 +57703,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44559,7 +57731,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44568,6 +57740,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44575,7 +57750,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { m.Limits = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44588,7 +57763,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44605,7 +57780,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44615,6 +57790,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44631,7 +57809,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44640,13 +57818,13 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44682,7 +57860,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44691,6 +57869,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44698,7 +57879,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { m.Requests = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44711,7 +57892,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44728,7 +57909,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44738,6 +57919,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44754,7 +57938,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44763,13 +57947,13 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44800,6 +57984,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44827,7 +58014,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44855,7 +58042,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44865,6 +58052,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44884,7 +58074,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44894,6 +58084,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44913,7 +58106,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44923,6 +58116,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44942,7 +58138,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44952,6 +58148,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44966,6 +58165,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44993,7 +58195,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45021,7 +58223,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45031,6 +58233,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45050,7 +58255,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45060,6 +58265,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45079,7 +58287,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45088,6 +58296,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45112,7 +58323,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45132,7 +58343,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45142,6 +58353,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45161,7 +58375,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45171,6 +58385,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45190,7 +58407,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45200,6 +58417,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45219,7 +58439,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45229,6 +58449,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45248,7 +58471,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45258,6 +58481,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45277,7 +58503,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45292,6 +58518,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45319,7 +58548,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45347,7 +58576,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45357,6 +58586,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45376,7 +58608,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45386,6 +58618,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45405,7 +58640,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45414,6 +58649,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45438,7 +58676,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45458,7 +58696,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45468,6 +58706,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45487,7 +58728,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45497,6 +58738,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45516,7 +58760,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45526,6 +58770,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45545,7 +58792,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45555,6 +58802,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45574,7 +58824,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45584,6 +58834,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45603,7 +58856,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45618,6 +58871,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45645,7 +58901,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45673,7 +58929,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45682,6 +58938,9 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45699,6 +58958,9 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45726,7 +58988,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45754,7 +59016,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45764,6 +59026,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45783,7 +59048,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45793,6 +59058,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45812,7 +59080,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45822,6 +59090,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45836,6 +59107,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45863,7 +59137,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45891,7 +59165,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45900,6 +59174,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45921,7 +59198,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45930,6 +59207,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45950,7 +59230,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45967,7 +59247,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45977,6 +59257,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -45993,7 +59276,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift + mapbyteLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46003,6 +59286,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } if postbytesIndex > l { return io.ErrUnexpectedEOF } @@ -46040,7 +59326,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46050,6 +59336,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46069,7 +59358,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46078,6 +59367,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46098,7 +59390,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46115,7 +59407,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46125,6 +59417,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -46141,7 +59436,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46151,6 +59446,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -46182,6 +59480,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46209,7 +59510,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46237,7 +59538,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46246,6 +59547,9 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46267,7 +59571,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46283,6 +59587,9 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46310,7 +59617,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46338,7 +59645,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46347,6 +59654,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46368,7 +59678,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46378,6 +59688,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46397,7 +59710,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46413,6 +59726,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46440,7 +59756,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46468,7 +59784,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46477,6 +59793,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46498,7 +59817,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46507,6 +59826,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46524,6 +59846,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46551,7 +59876,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46579,7 +59904,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46588,6 +59913,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46609,7 +59937,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46618,6 +59946,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46640,7 +59971,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46656,6 +59987,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46683,7 +60017,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46711,7 +60045,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46721,6 +60055,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46740,7 +60077,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46750,6 +60087,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46764,6 +60104,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46791,7 +60134,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46819,7 +60162,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46829,6 +60172,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46848,7 +60194,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46857,6 +60203,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46879,7 +60228,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -46899,7 +60248,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46915,6 +60264,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46942,7 +60294,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46970,7 +60322,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46979,6 +60331,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47003,7 +60358,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47024,7 +60379,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47033,6 +60388,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47057,7 +60415,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47077,7 +60435,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47098,7 +60456,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47119,7 +60477,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47140,7 +60498,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47160,7 +60518,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47170,12 +60528,51 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := ProcMountType(dAtA[iNdEx:postIndex]) m.ProcMount = &s iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47185,6 +60582,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47212,7 +60612,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47240,7 +60640,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47249,6 +60649,9 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47265,6 +60668,9 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47292,7 +60698,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47320,7 +60726,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47329,6 +60735,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47350,7 +60759,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47359,6 +60768,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47380,7 +60792,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47389,6 +60801,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47405,6 +60820,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47432,7 +60850,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47460,7 +60878,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47469,6 +60887,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47490,7 +60911,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47499,6 +60920,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47521,7 +60945,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47530,6 +60954,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47552,7 +60979,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47568,6 +60995,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47595,7 +61025,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47623,7 +61053,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47632,6 +61062,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47653,7 +61086,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47662,6 +61095,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47679,6 +61115,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47706,7 +61145,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47734,7 +61173,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47744,6 +61183,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47763,7 +61205,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47783,7 +61225,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47793,6 +61235,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47807,6 +61252,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47834,7 +61282,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47862,7 +61310,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47871,6 +61319,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47892,7 +61343,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47901,6 +61352,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47918,6 +61372,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47945,7 +61402,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47973,7 +61430,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47983,6 +61440,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48002,7 +61462,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48012,6 +61472,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48031,7 +61494,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48050,7 +61513,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48059,6 +61522,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48080,7 +61546,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NodePort |= (int32(b) & 0x7F) << shift + m.NodePort |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48094,6 +61560,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48121,7 +61590,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48149,7 +61618,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48159,6 +61628,9 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48173,6 +61645,9 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48200,7 +61675,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48228,7 +61703,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48237,6 +61712,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48259,7 +61737,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48268,6 +61746,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48288,7 +61769,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48305,7 +61786,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48315,6 +61796,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -48331,7 +61815,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48341,6 +61825,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -48377,7 +61864,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48387,6 +61874,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48406,7 +61896,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48416,6 +61906,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48435,7 +61928,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48445,6 +61938,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48464,7 +61960,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48474,6 +61970,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48493,7 +61992,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48503,6 +62002,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48522,7 +62024,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48532,6 +62034,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48551,7 +62056,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48561,6 +62066,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48580,7 +62088,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48590,6 +62098,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48609,7 +62120,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.HealthCheckNodePort |= (int32(b) & 0x7F) << shift + m.HealthCheckNodePort |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48628,7 +62139,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48648,7 +62159,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48657,6 +62168,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48667,6 +62181,39 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := IPFamily(dAtA[iNdEx:postIndex]) + m.IPFamily = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48676,6 +62223,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48703,7 +62253,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48731,7 +62281,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48740,6 +62290,9 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48756,6 +62309,9 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48783,7 +62339,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48811,7 +62367,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48820,6 +62376,9 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48839,6 +62398,9 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48866,7 +62428,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48894,7 +62456,39 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48904,35 +62498,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } @@ -48952,7 +62520,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48962,6 +62530,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48981,7 +62552,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49001,7 +62572,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49010,6 +62581,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49029,6 +62603,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49056,7 +62633,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49084,7 +62661,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49094,6 +62671,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49113,7 +62693,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49123,6 +62703,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49142,7 +62725,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49152,6 +62735,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49171,7 +62757,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49191,7 +62777,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49200,6 +62786,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49219,6 +62808,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49246,7 +62838,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49274,7 +62866,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49284,6 +62876,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49303,7 +62898,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49313,6 +62908,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49327,6 +62925,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49354,7 +62955,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49382,7 +62983,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49391,6 +62992,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49412,7 +63016,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49422,6 +63026,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49436,6 +63043,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49463,7 +63073,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49491,7 +63101,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49501,6 +63111,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49520,7 +63133,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49530,6 +63143,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49549,7 +63165,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49559,6 +63175,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49578,7 +63197,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49587,11 +63206,14 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TimeAdded == nil { - m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.TimeAdded = &v1.Time{} } if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -49606,6 +63228,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49633,7 +63258,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49661,7 +63286,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49671,6 +63296,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49690,7 +63318,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49700,6 +63328,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49719,7 +63350,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49729,6 +63360,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49748,7 +63382,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49758,6 +63392,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49777,7 +63414,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -49792,6 +63429,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49819,7 +63459,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49847,7 +63487,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49857,6 +63497,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49876,7 +63519,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49886,6 +63529,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49900,6 +63546,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49927,7 +63576,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49955,7 +63604,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49964,6 +63613,9 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49981,6 +63633,181 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType) + } + m.MaxSkew = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxSkew |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50008,7 +63835,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50036,7 +63863,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50046,6 +63873,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50066,7 +63896,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50076,6 +63906,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50095,7 +63928,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50105,6 +63938,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50119,6 +63955,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50146,7 +63985,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50174,7 +64013,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50184,6 +64023,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50203,7 +64045,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50212,6 +64054,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50228,6 +64073,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50255,7 +64103,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50283,7 +64131,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50293,6 +64141,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50312,7 +64163,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50322,6 +64173,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50336,6 +64190,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50363,7 +64220,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50391,7 +64248,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50401,6 +64258,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50420,7 +64280,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50440,7 +64300,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50450,6 +64310,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50469,7 +64332,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50479,6 +64342,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50498,7 +64364,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50508,6 +64374,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50528,7 +64397,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50538,6 +64407,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50552,6 +64424,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50579,7 +64454,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50607,7 +64482,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50616,6 +64491,9 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50635,6 +64513,9 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50662,7 +64543,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50690,7 +64571,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50699,6 +64580,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50723,7 +64607,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50732,6 +64616,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50756,7 +64643,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50765,6 +64652,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50789,7 +64679,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50798,6 +64688,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50817,6 +64710,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50844,7 +64740,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50872,7 +64768,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50881,6 +64777,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50905,7 +64804,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50914,6 +64813,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50938,7 +64840,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50947,6 +64849,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50971,7 +64876,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50980,6 +64885,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51004,7 +64912,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51013,6 +64921,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51037,7 +64948,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51046,6 +64957,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51070,7 +64984,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51079,6 +64993,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51103,7 +65020,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51112,6 +65029,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51136,7 +65056,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51145,6 +65065,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51169,7 +65092,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51178,6 +65101,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51202,7 +65128,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51211,6 +65137,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51235,7 +65164,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51244,6 +65173,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51268,7 +65200,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51277,6 +65209,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51301,7 +65236,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51310,6 +65245,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51334,7 +65272,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51343,6 +65281,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51367,7 +65308,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51376,6 +65317,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51400,7 +65344,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51409,6 +65353,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51433,7 +65380,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51442,6 +65389,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51466,7 +65416,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51475,6 +65425,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51499,7 +65452,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51508,6 +65461,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51532,7 +65488,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51541,6 +65497,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51565,7 +65524,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51574,6 +65533,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51598,7 +65560,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51607,6 +65569,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51631,7 +65596,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51640,6 +65605,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51664,7 +65632,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51673,6 +65641,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51697,7 +65668,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51706,6 +65677,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51730,7 +65704,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51739,6 +65713,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51763,7 +65740,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51772,6 +65749,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51791,6 +65771,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51818,7 +65801,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51846,7 +65829,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51856,6 +65839,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51875,7 +65861,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51885,6 +65871,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51904,7 +65893,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51914,6 +65903,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51933,7 +65925,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51943,6 +65935,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51957,6 +65952,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51984,7 +65982,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52012,7 +66010,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift + m.Weight |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -52031,7 +66029,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -52040,6 +66038,9 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52056,6 +66057,161 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsSecurityContextOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsSecurityContextOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpecName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GMSACredentialSpecName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpec", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GMSACredentialSpec = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RunAsUserName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -52122,10 +66278,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -52154,6 +66313,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -52172,820 +66334,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 12934 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x64, 0x57, - 0x56, 0x18, 0xbe, 0xaf, 0xbb, 0x25, 0x75, 0x1f, 0x7d, 0xdf, 0x99, 0xb1, 0x35, 0xf2, 0xcc, 0xf4, - 0xf8, 0x79, 0x77, 0x3c, 0x5e, 0xdb, 0xd2, 0x7a, 0x6c, 0xaf, 0xcd, 0xda, 0x6b, 0x90, 0xd4, 0xd2, - 0x4c, 0x7b, 0x46, 0x9a, 0xf6, 0x6d, 0xcd, 0x78, 0xd7, 0x78, 0x17, 0x3f, 0x75, 0x5f, 0x49, 0xcf, - 0x6a, 0xbd, 0xd7, 0x7e, 0xef, 0xb5, 0x66, 0xe4, 0x1f, 0xd4, 0x2f, 0x59, 0x02, 0x61, 0x03, 0x95, - 0xda, 0x4a, 0xb6, 0xf2, 0x01, 0x14, 0xa9, 0x22, 0xa4, 0x80, 0x90, 0xa4, 0x42, 0x20, 0x40, 0x58, - 0x48, 0x08, 0x24, 0x55, 0x24, 0x7f, 0x6c, 0x48, 0xaa, 0x52, 0x4b, 0x15, 0x15, 0x05, 0x44, 0x2a, - 0x29, 0xfe, 0x08, 0xa4, 0x42, 0xfe, 0x08, 0x0a, 0x15, 0x52, 0xf7, 0xf3, 0xdd, 0xfb, 0xfa, 0xbd, - 0xee, 0xd6, 0x58, 0x23, 0x9b, 0xad, 0xfd, 0xaf, 0xfb, 0x9e, 0x73, 0xcf, 0xbd, 0xef, 0x7e, 0x9e, - 0x73, 0xee, 0xf9, 0x80, 0x57, 0x76, 0x5e, 0x0e, 0xe7, 0x5c, 0x7f, 0x7e, 0xa7, 0xb3, 0x41, 0x02, - 0x8f, 0x44, 0x24, 0x9c, 0xdf, 0x23, 0x5e, 0xd3, 0x0f, 0xe6, 0x05, 0xc0, 0x69, 0xbb, 0xf3, 0x0d, - 0x3f, 0x20, 0xf3, 0x7b, 0xcf, 0xcd, 0x6f, 0x11, 0x8f, 0x04, 0x4e, 0x44, 0x9a, 0x73, 0xed, 0xc0, - 0x8f, 0x7c, 0x84, 0x38, 0xce, 0x9c, 0xd3, 0x76, 0xe7, 0x28, 0xce, 0xdc, 0xde, 0x73, 0xb3, 0xcf, - 0x6e, 0xb9, 0xd1, 0x76, 0x67, 0x63, 0xae, 0xe1, 0xef, 0xce, 0x6f, 0xf9, 0x5b, 0xfe, 0x3c, 0x43, - 0xdd, 0xe8, 0x6c, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbe, 0x10, 0x37, 0xb3, 0xeb, - 0x34, 0xb6, 0x5d, 0x8f, 0x04, 0xfb, 0xf3, 0xed, 0x9d, 0x2d, 0xd6, 0x6e, 0x40, 0x42, 0xbf, 0x13, - 0x34, 0x48, 0xb2, 0xe1, 0x9e, 0xb5, 0xc2, 0xf9, 0x5d, 0x12, 0x39, 0x29, 0xdd, 0x9d, 0x9d, 0xcf, - 0xaa, 0x15, 0x74, 0xbc, 0xc8, 0xdd, 0xed, 0x6e, 0xe6, 0xd3, 0xfd, 0x2a, 0x84, 0x8d, 0x6d, 0xb2, - 0xeb, 0x74, 0xd5, 0x7b, 0x3e, 0xab, 0x5e, 0x27, 0x72, 0x5b, 0xf3, 0xae, 0x17, 0x85, 0x51, 0x90, - 0xac, 0x64, 0x7f, 0xc3, 0x82, 0xcb, 0x0b, 0x6f, 0xd6, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, - 0x96, 0xdf, 0xd8, 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, - 0xcf, 0x40, 0x71, 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, - 0x1e, 0x94, 0x3f, 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, - 0x19, 0xae, 0xef, 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, - 0x02, 0x8a, 0xe6, 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, - 0x0e, 0x2d, 0x4e, 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, - 0xdb, 0x6b, 0xed, 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, - 0xfe, 0xe1, 0x1c, 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, - 0x26, 0x91, 0xff, 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, - 0xc5, 0xa9, 0xc3, 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, - 0xb2, 0x39, 0x46, 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, - 0x02, 0xac, 0x13, 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0x27, - 0xb2, 0xe8, 0x6a, 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, - 0x0f, 0x13, 0x0b, 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x01, 0x0a, 0x9e, 0xb3, - 0x4b, 0xc4, 0xfc, 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, - 0xe7, 0xbe, 0xd7, 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, - 0x20, 0x35, 0x27, 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0xfb, - 0x50, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, - 0x81, 0x2a, 0x9a, 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, - 0x17, 0x05, 0xfb, 0x8b, 0x8f, 0x8a, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xab, 0x1c, - 0x9c, 0x5b, 0x78, 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, - 0xe2, 0x11, 0x50, 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc2, 0x08, 0xfd, 0x7d, 0x07, - 0x57, 0xc5, 0x27, 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, - 0x85, 0xd1, 0x06, 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4d, 0xd1, - 0x97, 0xe2, 0xe2, 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, - 0x6a, 0x7f, 0x15, 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, - 0xfa, 0x36, 0x41, 0xcf, 0x41, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, - 0x37, 0x5d, 0xaf, 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xd8, - 0x82, 0x32, 0x83, 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, - 0xaf, 0x01, 0x84, 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, - 0x7a, 0x20, 0x84, 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, - 0x38, 0xc6, 0x81, 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, - 0x72, 0x00, 0xd9, 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0xbe, 0x25, 0x16, 0x0f, 0xfd, - 0xea, 0x8f, 0xf8, 0xb7, 0xda, 0xbf, 0x64, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0xbd, - 0x03, 0x45, 0x7a, 0x37, 0x35, 0x9d, 0xc8, 0x11, 0xe7, 0xde, 0xa7, 0xb4, 0xbd, 0xa5, 0xae, 0x8a, - 0xb9, 0xf6, 0xce, 0x16, 0x2d, 0x08, 0xe7, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0x37, 0xde, 0x25, 0x8d, - 0x68, 0x95, 0x44, 0x4e, 0xfc, 0x39, 0x71, 0x19, 0x56, 0x54, 0xd1, 0x4d, 0x18, 0x8e, 0x9c, 0x60, - 0x8b, 0x44, 0xe2, 0x00, 0x4c, 0x3d, 0xa8, 0x78, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1a, 0x24, 0xbe, - 0x16, 0xd6, 0x59, 0x55, 0x2c, 0x48, 0xd8, 0x7f, 0x65, 0x18, 0xce, 0x2f, 0xd5, 0xab, 0x19, 0xeb, - 0xea, 0x0a, 0x0c, 0x37, 0x03, 0x77, 0x8f, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x0a, 0x2b, 0xc5, 0x02, - 0x8a, 0x5e, 0x86, 0x31, 0x7e, 0x21, 0xdd, 0x70, 0xbc, 0x66, 0x4b, 0x0e, 0xf1, 0x59, 0x81, 0x3d, - 0x76, 0x57, 0x83, 0x61, 0x03, 0xf3, 0x98, 0x8b, 0xea, 0x4a, 0x62, 0x33, 0x66, 0x5d, 0x76, 0x5f, - 0xb6, 0x60, 0x8a, 0x37, 0xb3, 0x10, 0x45, 0x81, 0xbb, 0xd1, 0x89, 0x48, 0x38, 0x33, 0xc4, 0x4e, - 0xba, 0xa5, 0xb4, 0xd1, 0xca, 0x1c, 0x81, 0xb9, 0xbb, 0x09, 0x2a, 0xfc, 0x10, 0x9c, 0x11, 0xed, - 0x4e, 0x25, 0xc1, 0xb8, 0xab, 0x59, 0xf4, 0xbd, 0x16, 0xcc, 0x36, 0x7c, 0x2f, 0x0a, 0xfc, 0x56, - 0x8b, 0x04, 0xb5, 0xce, 0x46, 0xcb, 0x0d, 0xb7, 0xf9, 0x3a, 0xc5, 0x64, 0x93, 0x9d, 0x04, 0x19, - 0x73, 0xa8, 0x90, 0xc4, 0x1c, 0x5e, 0x3a, 0x3c, 0x28, 0xcf, 0x2e, 0x65, 0x92, 0xc2, 0x3d, 0x9a, - 0x41, 0x3b, 0x80, 0xe8, 0x55, 0x5a, 0x8f, 0x9c, 0x2d, 0x12, 0x37, 0x3e, 0x32, 0x78, 0xe3, 0x8f, - 0x1c, 0x1e, 0x94, 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, - 0xb7, 0x16, 0x07, 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, - 0xbb, 0x04, 0xe7, 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, - 0x74, 0x16, 0x86, 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xb2, 0x65, - 0xff, 0xeb, 0x3c, 0x4c, 0x2e, 0xd5, 0xab, 0x0f, 0xb4, 0x05, 0xf4, 0x3b, 0x20, 0xd7, 0xf3, 0x0e, - 0x88, 0x6f, 0x94, 0x7c, 0xe6, 0x8d, 0xf2, 0xff, 0xa7, 0xac, 0xdf, 0x02, 0x5b, 0xbf, 0xdf, 0x96, - 0xb1, 0x7e, 0x4f, 0x78, 0xd5, 0xee, 0x65, 0x4c, 0xe1, 0x10, 0x9b, 0xc2, 0x54, 0x76, 0xe1, 0x96, - 0xdf, 0x70, 0x5a, 0xc9, 0x73, 0xe7, 0x43, 0x99, 0xc7, 0x06, 0x8c, 0x2d, 0x39, 0x6d, 0x67, 0xc3, - 0x6d, 0xb9, 0x91, 0x4b, 0x42, 0xf4, 0x24, 0xe4, 0x9d, 0x66, 0x93, 0xb1, 0x3a, 0xa5, 0xc5, 0x73, - 0x87, 0x07, 0xe5, 0xfc, 0x42, 0x93, 0xde, 0xb9, 0xa0, 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x9f, 0x84, - 0x42, 0x33, 0xf0, 0xdb, 0x33, 0x39, 0x86, 0x49, 0x97, 0x7c, 0xa1, 0x12, 0xf8, 0xed, 0x04, 0x2a, - 0xc3, 0xb1, 0x7f, 0x2d, 0x07, 0x17, 0x96, 0x48, 0x7b, 0x7b, 0xa5, 0x9e, 0x71, 0x78, 0x5e, 0x85, - 0xe2, 0xae, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x45, 0x19, 0x56, 0x50, - 0x74, 0x19, 0x0a, 0xed, 0x98, 0xa3, 0x1b, 0x93, 0xdc, 0x20, 0xe3, 0xe5, 0x18, 0x84, 0x62, 0x74, - 0x42, 0x12, 0x88, 0x15, 0xa3, 0x30, 0xee, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x5f, 0x8b, 0xf4, 0xc2, - 0x14, 0xc7, 0x63, 0xe2, 0x5a, 0xa4, 0x10, 0xac, 0x61, 0xa1, 0x1a, 0x94, 0xc2, 0xc4, 0xcc, 0x0e, - 0xb4, 0x39, 0xc7, 0xd9, 0xbd, 0xa9, 0x66, 0x32, 0x26, 0x62, 0x1c, 0xe7, 0xc3, 0x7d, 0xef, 0xcd, - 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x73, 0x36, 0x70, 0x77, 0xba, 0x07, 0x6e, 0xf0, 0x2d, 0x71, - 0x52, 0xa3, 0xf7, 0xbf, 0x2c, 0xb8, 0xb0, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x87, 0x23, - 0x48, 0x1e, 0xef, 0xc6, 0x36, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, - 0xd9, 0x1f, 0xb9, 0x8f, 0xbd, 0xd3, 0xfd, 0xb1, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb1, 0xd4, - 0x72, 0x89, 0x17, 0x55, 0x6b, 0x4b, 0xbe, 0xb7, 0xe9, 0x6e, 0xa1, 0xcf, 0xc0, 0x44, 0xe4, 0xee, - 0x12, 0xbf, 0x13, 0xd5, 0x49, 0xc3, 0xf7, 0x98, 0x18, 0x47, 0x25, 0x7a, 0x74, 0x78, 0x50, 0x9e, - 0x58, 0x37, 0x20, 0x38, 0x81, 0x69, 0xff, 0x0e, 0x1d, 0x3f, 0x7f, 0xb7, 0xed, 0x7b, 0xc4, 0x8b, - 0x96, 0x7c, 0xaf, 0xc9, 0xc5, 0xfd, 0xcf, 0x40, 0x21, 0xa2, 0xe3, 0xc1, 0xc7, 0xee, 0x8a, 0xdc, - 0x28, 0x74, 0x14, 0x8e, 0x0e, 0xca, 0x8f, 0x74, 0xd7, 0x60, 0xe3, 0xc4, 0xea, 0xa0, 0x6f, 0x83, - 0xe1, 0x30, 0x72, 0xa2, 0x4e, 0x28, 0x46, 0xf3, 0x71, 0x39, 0x9a, 0x75, 0x56, 0x7a, 0x74, 0x50, - 0x9e, 0x54, 0xd5, 0x78, 0x11, 0x16, 0x15, 0xd0, 0x53, 0x30, 0xb2, 0x4b, 0xc2, 0xd0, 0xd9, 0x92, - 0xb7, 0xe1, 0xa4, 0xa8, 0x3b, 0xb2, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x27, 0x60, 0x88, 0x04, 0x81, - 0x1f, 0x88, 0x3d, 0x3a, 0x2e, 0x10, 0x87, 0x96, 0x69, 0x21, 0xe6, 0x30, 0xfb, 0xdf, 0x59, 0x30, - 0xa9, 0xfa, 0xca, 0xdb, 0x3a, 0x05, 0x96, 0xfc, 0x2d, 0x80, 0x86, 0xfc, 0xc0, 0x90, 0xdd, 0x1e, - 0xa3, 0xd7, 0xae, 0xa4, 0x5e, 0xd4, 0x5d, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, - 0xbf, 0x6a, 0xc1, 0x99, 0xc4, 0x17, 0xdd, 0x72, 0xc3, 0x08, 0xbd, 0xdd, 0xf5, 0x55, 0x73, 0x83, - 0x7d, 0x15, 0xad, 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x0d, 0x18, 0x72, 0x23, - 0xb2, 0x2b, 0x3f, 0xe6, 0x89, 0x9e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x95, 0xd6, 0xc4, 0x9c, - 0x80, 0xfd, 0xd7, 0xf3, 0x50, 0xe2, 0xcb, 0x76, 0xd5, 0x69, 0x9f, 0xc2, 0x5c, 0x54, 0xa1, 0xc0, - 0xa8, 0xf3, 0x8e, 0x3f, 0x99, 0xde, 0x71, 0xd1, 0x9d, 0x39, 0x2a, 0x6f, 0x73, 0xe6, 0x48, 0x5d, - 0x0d, 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0x70, 0x3d, 0x27, 0xd8, 0xa7, 0x65, 0x33, 0x79, - 0x46, 0xf0, 0xd9, 0xde, 0x04, 0x17, 0x15, 0x3e, 0x27, 0xab, 0xfa, 0x1a, 0x03, 0xb0, 0x46, 0x74, - 0xf6, 0x25, 0x28, 0x29, 0xe4, 0xe3, 0xf0, 0x38, 0xb3, 0x9f, 0x85, 0xc9, 0x44, 0x5b, 0xfd, 0xaa, - 0x8f, 0xe9, 0x2c, 0xd2, 0x2f, 0xb3, 0x53, 0x40, 0xf4, 0x7a, 0xd9, 0xdb, 0x13, 0xa7, 0xe8, 0xfb, - 0x70, 0xb6, 0x95, 0x72, 0x38, 0x89, 0xa9, 0x1a, 0xfc, 0x30, 0xbb, 0x20, 0x3e, 0xfb, 0x6c, 0x1a, - 0x14, 0xa7, 0xb6, 0x41, 0xaf, 0x7d, 0xbf, 0x4d, 0xd7, 0xbc, 0xd3, 0xd2, 0x39, 0xe8, 0xdb, 0xa2, - 0x0c, 0x2b, 0x28, 0x3d, 0xc2, 0xce, 0xaa, 0xce, 0xdf, 0x24, 0xfb, 0x75, 0xd2, 0x22, 0x8d, 0xc8, - 0x0f, 0x3e, 0xd4, 0xee, 0x5f, 0xe4, 0xa3, 0xcf, 0x4f, 0xc0, 0x51, 0x41, 0x20, 0x7f, 0x93, 0xec, - 0xf3, 0xa9, 0xd0, 0xbf, 0x2e, 0xdf, 0xf3, 0xeb, 0x7e, 0xd6, 0x82, 0x71, 0xf5, 0x75, 0xa7, 0xb0, - 0xd5, 0x17, 0xcd, 0xad, 0x7e, 0xb1, 0xe7, 0x02, 0xcf, 0xd8, 0xe4, 0x5f, 0xcb, 0xc1, 0x79, 0x85, - 0x43, 0xd9, 0x7d, 0xfe, 0x47, 0xac, 0xaa, 0x79, 0x28, 0x79, 0x4a, 0x0b, 0x64, 0x99, 0xea, 0x97, - 0x58, 0x07, 0x14, 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x55, 0x35, 0x63, 0xba, 0x7a, 0x54, 0xa8, 0x42, - 0x17, 0x21, 0xdf, 0x71, 0x9b, 0xe2, 0xce, 0xf8, 0x94, 0x1c, 0xed, 0x3b, 0xd5, 0xca, 0xd1, 0x41, - 0xf9, 0xf1, 0x2c, 0xd5, 0x3c, 0xbd, 0xac, 0xc2, 0xb9, 0x3b, 0xd5, 0x0a, 0xa6, 0x95, 0xd1, 0x02, - 0x4c, 0xca, 0xd7, 0x87, 0xbb, 0x94, 0x83, 0xf2, 0x3d, 0x71, 0xb5, 0x28, 0x1d, 0x27, 0x36, 0xc1, - 0x38, 0x89, 0x8f, 0x2a, 0x30, 0xb5, 0xd3, 0xd9, 0x20, 0x2d, 0x12, 0xf1, 0x0f, 0xbe, 0x49, 0xb8, - 0x06, 0xb0, 0x14, 0x0b, 0x5b, 0x37, 0x13, 0x70, 0xdc, 0x55, 0xc3, 0xfe, 0x33, 0x76, 0xc4, 0x8b, - 0xd1, 0xab, 0x05, 0x3e, 0x5d, 0x58, 0x94, 0xfa, 0x87, 0xb9, 0x9c, 0x07, 0x59, 0x15, 0x37, 0xc9, - 0xfe, 0xba, 0x4f, 0x99, 0xed, 0xf4, 0x55, 0x61, 0xac, 0xf9, 0x42, 0xcf, 0x35, 0xff, 0xf3, 0x39, - 0x38, 0xa7, 0x46, 0xc0, 0xe0, 0xeb, 0xfe, 0xbc, 0x8f, 0xc1, 0x73, 0x30, 0xda, 0x24, 0x9b, 0x4e, - 0xa7, 0x15, 0x29, 0x75, 0xf4, 0x10, 0x7f, 0x92, 0xa8, 0xc4, 0xc5, 0x58, 0xc7, 0x39, 0xc6, 0xb0, - 0xfd, 0xc4, 0x28, 0xbb, 0x5b, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xcc, 0x5d, 0xf3, 0x04, - 0x0c, 0xb9, 0xbb, 0x94, 0xd7, 0xca, 0x99, 0x2c, 0x54, 0x95, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x80, - 0x91, 0x86, 0xbf, 0xbb, 0xeb, 0x78, 0x4d, 0x76, 0xe5, 0x95, 0x16, 0x47, 0x29, 0x3b, 0xb6, 0xc4, - 0x8b, 0xb0, 0x84, 0xa1, 0x0b, 0x50, 0x70, 0x82, 0x2d, 0xae, 0x96, 0x28, 0x2d, 0x16, 0x69, 0x4b, - 0x0b, 0xc1, 0x56, 0x88, 0x59, 0x29, 0x95, 0xaa, 0xee, 0xf9, 0xc1, 0x8e, 0xeb, 0x6d, 0x55, 0xdc, - 0x40, 0x6c, 0x09, 0x75, 0x17, 0xbe, 0xa9, 0x20, 0x58, 0xc3, 0x42, 0x2b, 0x30, 0xd4, 0xf6, 0x83, - 0x28, 0x9c, 0x19, 0x66, 0xc3, 0xfd, 0x78, 0xc6, 0x41, 0xc4, 0xbf, 0xb6, 0xe6, 0x07, 0x51, 0xfc, - 0x01, 0xf4, 0x5f, 0x88, 0x79, 0x75, 0xf4, 0x6d, 0x90, 0x27, 0xde, 0xde, 0xcc, 0x08, 0xa3, 0x32, - 0x9b, 0x46, 0x65, 0xd9, 0xdb, 0xbb, 0xeb, 0x04, 0xf1, 0x29, 0xbd, 0xec, 0xed, 0x61, 0x5a, 0x07, - 0x7d, 0x1e, 0x4a, 0x72, 0x8b, 0x87, 0x42, 0x5d, 0x95, 0xba, 0xc4, 0xe4, 0xc1, 0x80, 0xc9, 0x7b, - 0x1d, 0x37, 0x20, 0xbb, 0xc4, 0x8b, 0xc2, 0xf8, 0x4c, 0x93, 0xd0, 0x10, 0xc7, 0xd4, 0xd0, 0xe7, - 0xa5, 0x8e, 0x74, 0xd5, 0xef, 0x78, 0x51, 0x38, 0x53, 0x62, 0xdd, 0x4b, 0x7d, 0xbd, 0xba, 0x1b, - 0xe3, 0x25, 0x95, 0xa8, 0xbc, 0x32, 0x36, 0x48, 0x21, 0x0c, 0xe3, 0x2d, 0x77, 0x8f, 0x78, 0x24, - 0x0c, 0x6b, 0x81, 0xbf, 0x41, 0x66, 0x80, 0xf5, 0xfc, 0x7c, 0xfa, 0xa3, 0x8e, 0xbf, 0x41, 0x16, - 0xa7, 0x0f, 0x0f, 0xca, 0xe3, 0xb7, 0xf4, 0x3a, 0xd8, 0x24, 0x81, 0xee, 0xc0, 0x04, 0x95, 0x6b, - 0xdc, 0x98, 0xe8, 0x68, 0x3f, 0xa2, 0x4c, 0xfa, 0xc0, 0x46, 0x25, 0x9c, 0x20, 0x82, 0x5e, 0x87, - 0x52, 0xcb, 0xdd, 0x24, 0x8d, 0xfd, 0x46, 0x8b, 0xcc, 0x8c, 0x31, 0x8a, 0xa9, 0xdb, 0xea, 0x96, - 0x44, 0xe2, 0x72, 0x91, 0xfa, 0x8b, 0xe3, 0xea, 0xe8, 0x2e, 0x3c, 0x12, 0x91, 0x60, 0xd7, 0xf5, - 0x1c, 0xba, 0x1d, 0x84, 0xbc, 0xc0, 0x9e, 0xc6, 0xc6, 0xd9, 0x7a, 0xbb, 0x24, 0x86, 0xee, 0x91, - 0xf5, 0x54, 0x2c, 0x9c, 0x51, 0x1b, 0xdd, 0x86, 0x49, 0xb6, 0x13, 0x6a, 0x9d, 0x56, 0xab, 0xe6, - 0xb7, 0xdc, 0xc6, 0xfe, 0xcc, 0x04, 0x23, 0xf8, 0x09, 0x79, 0x2f, 0x54, 0x4d, 0xf0, 0xd1, 0x41, - 0x19, 0xe2, 0x7f, 0x38, 0x59, 0x1b, 0x6d, 0xb0, 0xb7, 0x90, 0x4e, 0xe0, 0x46, 0xfb, 0x74, 0xfd, - 0x92, 0xfb, 0xd1, 0xcc, 0x64, 0x4f, 0x51, 0x58, 0x47, 0x55, 0x0f, 0x26, 0x7a, 0x21, 0x4e, 0x12, - 0xa4, 0x5b, 0x3b, 0x8c, 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, - 0x73, 0x18, 0x7b, 0x07, 0xa1, 0x3f, 0x6e, 0xd3, 0x13, 0x74, 0x9a, 0x21, 0xc6, 0xef, 0x20, 0x12, - 0x80, 0x63, 0x1c, 0xca, 0xd4, 0x44, 0xd1, 0xfe, 0x0c, 0x62, 0xa8, 0x6a, 0xbb, 0xac, 0xaf, 0x7f, - 0x1e, 0xd3, 0x72, 0x74, 0x0b, 0x46, 0x88, 0xb7, 0xb7, 0x12, 0xf8, 0xbb, 0x33, 0x67, 0xb2, 0xf7, - 0xec, 0x32, 0x47, 0xe1, 0x07, 0x7a, 0x2c, 0xe0, 0x89, 0x62, 0x2c, 0x49, 0xa0, 0xfb, 0x30, 0x93, - 0x32, 0x23, 0x7c, 0x02, 0xce, 0xb2, 0x09, 0x78, 0x55, 0xd4, 0x9d, 0x59, 0xcf, 0xc0, 0x3b, 0xea, - 0x01, 0xc3, 0x99, 0xd4, 0xd1, 0x17, 0x60, 0x9c, 0x6f, 0x28, 0xfe, 0x88, 0x1a, 0xce, 0x9c, 0x63, - 0x5f, 0x73, 0x39, 0x7b, 0x73, 0x72, 0xc4, 0xc5, 0x73, 0xa2, 0x43, 0xe3, 0x7a, 0x69, 0x88, 0x4d, - 0x6a, 0xf6, 0x06, 0x4c, 0xa8, 0x73, 0x8b, 0x2d, 0x1d, 0x54, 0x86, 0x21, 0xc6, 0xed, 0x08, 0xfd, - 0x56, 0x89, 0xce, 0x14, 0xe3, 0x84, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0xbe, 0x4f, 0x16, 0xf7, 0x23, - 0xc2, 0xa5, 0xea, 0xbc, 0x36, 0x53, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xff, 0x72, 0xae, 0x31, 0x3e, - 0x1c, 0x07, 0xb8, 0x0e, 0x9e, 0x81, 0xe2, 0xb6, 0x1f, 0x46, 0x14, 0x9b, 0xb5, 0x31, 0x14, 0xf3, - 0x89, 0x37, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x15, 0x18, 0x6f, 0xe8, 0x0d, 0x88, 0xbb, 0x4c, 0x0d, - 0x81, 0xd1, 0x3a, 0x36, 0x71, 0xd1, 0xcb, 0x50, 0x64, 0x26, 0x10, 0x0d, 0xbf, 0x25, 0x98, 0x2c, - 0x79, 0x21, 0x17, 0x6b, 0xa2, 0xfc, 0x48, 0xfb, 0x8d, 0x15, 0x36, 0xba, 0x02, 0xc3, 0xb4, 0x0b, - 0xd5, 0x9a, 0xb8, 0x45, 0x94, 0xaa, 0xe6, 0x06, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0xb5, 0x9c, 0x36, - 0xca, 0x54, 0x22, 0x25, 0xa8, 0x06, 0x23, 0xf7, 0x1c, 0x37, 0x72, 0xbd, 0x2d, 0xc1, 0x2e, 0x3c, - 0xd5, 0xf3, 0x4a, 0x61, 0x95, 0xde, 0xe4, 0x15, 0xf8, 0xa5, 0x27, 0xfe, 0x60, 0x49, 0x86, 0x52, - 0x0c, 0x3a, 0x9e, 0x47, 0x29, 0xe6, 0x06, 0xa5, 0x88, 0x79, 0x05, 0x4e, 0x51, 0xfc, 0xc1, 0x92, - 0x0c, 0x7a, 0x1b, 0x40, 0x2e, 0x4b, 0xd2, 0x14, 0xa6, 0x07, 0xcf, 0xf4, 0x27, 0xba, 0xae, 0xea, - 0x2c, 0x4e, 0xd0, 0x2b, 0x35, 0xfe, 0x8f, 0x35, 0x7a, 0x76, 0xc4, 0xd8, 0xaa, 0xee, 0xce, 0xa0, - 0xef, 0xa4, 0x27, 0x81, 0x13, 0x44, 0xa4, 0xb9, 0x10, 0x89, 0xc1, 0xf9, 0xe4, 0x60, 0x32, 0xc5, - 0xba, 0xbb, 0x4b, 0xf4, 0x53, 0x43, 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x17, 0xf3, 0x30, 0x93, 0xd5, - 0x5d, 0xba, 0xe8, 0xc8, 0x7d, 0x37, 0x5a, 0xa2, 0xdc, 0x90, 0x65, 0x2e, 0xba, 0x65, 0x51, 0x8e, - 0x15, 0x06, 0x9d, 0xfd, 0xd0, 0xdd, 0x92, 0x22, 0xe1, 0x50, 0x3c, 0xfb, 0x75, 0x56, 0x8a, 0x05, - 0x94, 0xe2, 0x05, 0xc4, 0x09, 0x85, 0x6d, 0x8b, 0xb6, 0x4a, 0x30, 0x2b, 0xc5, 0x02, 0xaa, 0xeb, - 0x9b, 0x0a, 0x7d, 0xf4, 0x4d, 0xc6, 0x10, 0x0d, 0x9d, 0xec, 0x10, 0xa1, 0x2f, 0x02, 0x6c, 0xba, - 0x9e, 0x1b, 0x6e, 0x33, 0xea, 0xc3, 0xc7, 0xa6, 0xae, 0x78, 0xa9, 0x15, 0x45, 0x05, 0x6b, 0x14, - 0xd1, 0x8b, 0x30, 0xaa, 0x36, 0x60, 0xb5, 0xc2, 0x1e, 0xfa, 0x34, 0xc3, 0x89, 0xf8, 0x34, 0xaa, - 0x60, 0x1d, 0xcf, 0x7e, 0x37, 0xb9, 0x5e, 0xc4, 0x0e, 0xd0, 0xc6, 0xd7, 0x1a, 0x74, 0x7c, 0x73, - 0xbd, 0xc7, 0xd7, 0xfe, 0xf5, 0x3c, 0x4c, 0x1a, 0x8d, 0x75, 0xc2, 0x01, 0xce, 0xac, 0xeb, 0xf4, - 0x9e, 0x73, 0x22, 0x22, 0xf6, 0x9f, 0xdd, 0x7f, 0xab, 0xe8, 0x77, 0x21, 0xdd, 0x01, 0xbc, 0x3e, - 0xfa, 0x22, 0x94, 0x5a, 0x4e, 0xc8, 0x74, 0x57, 0x44, 0xec, 0xbb, 0x41, 0x88, 0xc5, 0x72, 0x84, - 0x13, 0x46, 0xda, 0x55, 0xc3, 0x69, 0xc7, 0x24, 0xe9, 0x85, 0x4c, 0x79, 0x1f, 0x69, 0x3c, 0xa5, - 0x3a, 0x41, 0x19, 0xa4, 0x7d, 0xcc, 0x61, 0xe8, 0x65, 0x18, 0x0b, 0x08, 0x5b, 0x15, 0x4b, 0x94, - 0x95, 0x63, 0xcb, 0x6c, 0x28, 0xe6, 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0xcc, 0xca, 0x0f, 0xf7, - 0x60, 0xe5, 0x9f, 0x82, 0x11, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x55, 0x5e, 0x8c, 0x25, 0x3c, - 0xb9, 0x60, 0x8a, 0x03, 0x2e, 0x98, 0x4f, 0xc2, 0x44, 0xc5, 0x21, 0xbb, 0xbe, 0xb7, 0xec, 0x35, - 0xdb, 0xbe, 0xeb, 0x45, 0x68, 0x06, 0x0a, 0xec, 0x76, 0xe0, 0x7b, 0xbb, 0x40, 0x29, 0xe0, 0x02, - 0x65, 0xcc, 0xed, 0x2d, 0x38, 0x57, 0xf1, 0xef, 0x79, 0xf7, 0x9c, 0xa0, 0xb9, 0x50, 0xab, 0x6a, - 0x72, 0xee, 0x9a, 0x94, 0xb3, 0xb8, 0x31, 0x52, 0xea, 0x99, 0xaa, 0xd5, 0xe4, 0x77, 0xed, 0x8a, - 0xdb, 0x22, 0x19, 0xda, 0x88, 0xbf, 0x99, 0x33, 0x5a, 0x8a, 0xf1, 0xd5, 0x83, 0x91, 0x95, 0xf9, - 0x60, 0xf4, 0x06, 0x14, 0x37, 0x5d, 0xd2, 0x6a, 0x62, 0xb2, 0x29, 0x96, 0xd8, 0x93, 0xd9, 0xf6, - 0x15, 0x2b, 0x14, 0x53, 0x6a, 0x9f, 0xb8, 0x94, 0xb6, 0x22, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc0, - 0x94, 0x14, 0x03, 0x24, 0x54, 0x2c, 0xb8, 0xa7, 0x7a, 0xc9, 0x16, 0x26, 0xf1, 0xb3, 0x87, 0x07, - 0xe5, 0x29, 0x9c, 0x20, 0x83, 0xbb, 0x08, 0x53, 0xb1, 0x6c, 0x97, 0x1e, 0xad, 0x05, 0x36, 0xfc, - 0x4c, 0x2c, 0x63, 0x12, 0x26, 0x2b, 0xb5, 0x7f, 0xd4, 0x82, 0x47, 0xbb, 0x46, 0x46, 0x48, 0xda, - 0x27, 0x3c, 0x0b, 0x49, 0xc9, 0x37, 0xd7, 0x5f, 0xf2, 0xb5, 0xff, 0x81, 0x05, 0x67, 0x97, 0x77, - 0xdb, 0xd1, 0x7e, 0xc5, 0x35, 0x5f, 0x77, 0x5e, 0x82, 0xe1, 0x5d, 0xd2, 0x74, 0x3b, 0xbb, 0x62, - 0xe6, 0xca, 0xf2, 0xf8, 0x59, 0x65, 0xa5, 0x47, 0x07, 0xe5, 0xf1, 0x7a, 0xe4, 0x07, 0xce, 0x16, - 0xe1, 0x05, 0x58, 0xa0, 0xb3, 0x43, 0xdc, 0x7d, 0x9f, 0xdc, 0x72, 0x77, 0x5d, 0x69, 0x2f, 0xd3, - 0x53, 0x77, 0x36, 0x27, 0x07, 0x74, 0xee, 0x8d, 0x8e, 0xe3, 0x45, 0x6e, 0xb4, 0x2f, 0x1e, 0x66, - 0x24, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0xa4, 0x5c, 0xf7, 0x0b, 0xcd, 0x66, 0x40, 0xc2, - 0x10, 0xcd, 0x42, 0xce, 0x6d, 0x8b, 0x5e, 0x82, 0xe8, 0x65, 0xae, 0x5a, 0xc3, 0x39, 0xb7, 0x8d, - 0x6a, 0x50, 0xe2, 0x66, 0x37, 0xf1, 0xe2, 0x1a, 0xc8, 0x78, 0x87, 0xf5, 0x60, 0x5d, 0xd6, 0xc4, - 0x31, 0x11, 0xc9, 0xc1, 0xb1, 0x33, 0x33, 0x6f, 0xbe, 0x7a, 0xdd, 0x10, 0xe5, 0x58, 0x61, 0xa0, - 0xab, 0x50, 0xf4, 0xfc, 0x26, 0xb7, 0x82, 0xe2, 0xb7, 0x1f, 0x5b, 0xb2, 0x6b, 0xa2, 0x0c, 0x2b, - 0xa8, 0xfd, 0x43, 0x16, 0x8c, 0xc9, 0x2f, 0x1b, 0x90, 0x99, 0xa4, 0x5b, 0x2b, 0x66, 0x24, 0xe3, - 0xad, 0x45, 0x99, 0x41, 0x06, 0x31, 0x78, 0xc0, 0xfc, 0x71, 0x78, 0x40, 0xfb, 0x47, 0x72, 0x30, - 0x21, 0xbb, 0x53, 0xef, 0x6c, 0x84, 0x24, 0x42, 0xeb, 0x50, 0x72, 0xf8, 0x90, 0x13, 0xb9, 0x62, - 0x9f, 0x48, 0x17, 0x3e, 0x8c, 0xf9, 0x89, 0xaf, 0xe5, 0x05, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x0b, - 0xa6, 0x3d, 0x3f, 0x62, 0x47, 0xb4, 0x82, 0xf7, 0x7a, 0x02, 0x49, 0x52, 0x3f, 0x2f, 0xa8, 0x4f, - 0xaf, 0x25, 0xa9, 0xe0, 0x6e, 0xc2, 0x68, 0x59, 0x2a, 0x3c, 0xf2, 0xd9, 0xe2, 0x86, 0x3e, 0x0b, - 0xe9, 0xfa, 0x0e, 0xfb, 0x57, 0x2c, 0x28, 0x49, 0xb4, 0xd3, 0x78, 0xed, 0x5a, 0x85, 0x91, 0x90, - 0x4d, 0x82, 0x1c, 0x1a, 0xbb, 0x57, 0xc7, 0xf9, 0x7c, 0xc5, 0x37, 0x0f, 0xff, 0x1f, 0x62, 0x49, - 0x83, 0xe9, 0xbb, 0x55, 0xf7, 0x3f, 0x22, 0xfa, 0x6e, 0xd5, 0x9f, 0x8c, 0x1b, 0xe6, 0xbf, 0xb1, - 0x3e, 0x6b, 0x62, 0x2d, 0x65, 0x90, 0xda, 0x01, 0xd9, 0x74, 0xef, 0x27, 0x19, 0xa4, 0x1a, 0x2b, - 0xc5, 0x02, 0x8a, 0xde, 0x86, 0xb1, 0x86, 0x54, 0x74, 0xc6, 0xc7, 0xc0, 0x95, 0x9e, 0x4a, 0x77, - 0xf5, 0x3e, 0xc3, 0x2d, 0xa4, 0x97, 0xb4, 0xfa, 0xd8, 0xa0, 0x66, 0x3e, 0xb7, 0xe7, 0xfb, 0x3d, - 0xb7, 0xc7, 0x74, 0xb3, 0x1f, 0x9f, 0x7f, 0xcc, 0x82, 0x61, 0xae, 0x2e, 0x1b, 0x4c, 0xbf, 0xa8, - 0x3d, 0x57, 0xc5, 0x63, 0x77, 0x97, 0x16, 0x8a, 0xe7, 0x27, 0xb4, 0x0a, 0x25, 0xf6, 0x83, 0xa9, - 0x0d, 0xf2, 0xd9, 0xa6, 0xe1, 0xbc, 0x55, 0xbd, 0x83, 0x77, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0xbf, - 0x9a, 0xa7, 0x47, 0x55, 0x8c, 0x6a, 0xdc, 0xe0, 0xd6, 0xc3, 0xbb, 0xc1, 0x73, 0x0f, 0xeb, 0x06, - 0xdf, 0x82, 0xc9, 0x86, 0xf6, 0xb8, 0x15, 0xcf, 0xe4, 0xd5, 0x9e, 0x8b, 0x44, 0x7b, 0x07, 0xe3, - 0x2a, 0xa3, 0x25, 0x93, 0x08, 0x4e, 0x52, 0x45, 0xdf, 0x09, 0x63, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, - 0x62, 0xe1, 0x13, 0xd9, 0xeb, 0x45, 0x6f, 0x82, 0xad, 0xc4, 0xba, 0x56, 0x1d, 0x1b, 0xc4, 0xec, - 0x5f, 0x2c, 0xc2, 0xd0, 0xf2, 0x1e, 0xf1, 0xa2, 0x53, 0x38, 0x90, 0x1a, 0x30, 0xe1, 0x7a, 0x7b, - 0x7e, 0x6b, 0x8f, 0x34, 0x39, 0xfc, 0x38, 0x97, 0xeb, 0x23, 0x82, 0xf4, 0x44, 0xd5, 0x20, 0x81, - 0x13, 0x24, 0x1f, 0x86, 0x84, 0x79, 0x1d, 0x86, 0xf9, 0xdc, 0x0b, 0xf1, 0x32, 0x55, 0x19, 0xcc, - 0x06, 0x51, 0xec, 0x82, 0x58, 0xfa, 0xe5, 0xda, 0x67, 0x51, 0x1d, 0xbd, 0x0b, 0x13, 0x9b, 0x6e, - 0x10, 0x46, 0x54, 0x34, 0x0c, 0x23, 0x67, 0xb7, 0xfd, 0x00, 0x12, 0xa5, 0x1a, 0x87, 0x15, 0x83, - 0x12, 0x4e, 0x50, 0x46, 0x5b, 0x30, 0x4e, 0x85, 0x9c, 0xb8, 0xa9, 0x91, 0x63, 0x37, 0xa5, 0x54, - 0x46, 0xb7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0x0f, 0x93, 0x06, 0x13, 0x8a, 0x8a, 0x8c, 0xa3, 0x50, - 0x87, 0x09, 0x97, 0x86, 0x38, 0x8c, 0x9e, 0x49, 0xcc, 0x6c, 0xa5, 0x64, 0x9e, 0x49, 0x9a, 0x71, - 0xca, 0x3b, 0x50, 0x22, 0x74, 0x08, 0x29, 0x61, 0xa1, 0x18, 0x9f, 0x1f, 0xac, 0xaf, 0xab, 0x6e, - 0x23, 0xf0, 0x4d, 0x59, 0x7e, 0x59, 0x52, 0xc2, 0x31, 0x51, 0xb4, 0x04, 0xc3, 0x21, 0x09, 0x5c, - 0x12, 0x0a, 0x15, 0x79, 0x8f, 0x69, 0x64, 0x68, 0xdc, 0xe2, 0x93, 0xff, 0xc6, 0xa2, 0x2a, 0x5d, - 0x5e, 0x0e, 0x93, 0x86, 0x98, 0x56, 0x5c, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1d, - 0x46, 0x02, 0xd2, 0x62, 0xca, 0xa2, 0xf1, 0xc1, 0x17, 0x39, 0xd7, 0x3d, 0xf1, 0x7a, 0x58, 0x12, - 0x40, 0x37, 0x01, 0x05, 0x84, 0xf2, 0x10, 0xae, 0xb7, 0xa5, 0x8c, 0x39, 0x84, 0xae, 0xfb, 0x31, - 0xd1, 0xfe, 0x19, 0x1c, 0x63, 0x48, 0xeb, 0x62, 0x9c, 0x52, 0x0d, 0x5d, 0x87, 0x69, 0x55, 0x5a, - 0xf5, 0xc2, 0xc8, 0xf1, 0x1a, 0x84, 0xa9, 0xb9, 0x4b, 0x31, 0x57, 0x84, 0x93, 0x08, 0xb8, 0xbb, - 0x8e, 0xfd, 0xd3, 0x94, 0x9d, 0xa1, 0xa3, 0x75, 0x0a, 0xbc, 0xc0, 0x6b, 0x26, 0x2f, 0x70, 0x3e, - 0x73, 0xe6, 0x32, 0xf8, 0x80, 0x43, 0x0b, 0x46, 0xb5, 0x99, 0x8d, 0xd7, 0xac, 0xd5, 0x63, 0xcd, - 0x76, 0x60, 0x8a, 0xae, 0xf4, 0xdb, 0x1b, 0x21, 0x09, 0xf6, 0x48, 0x93, 0x2d, 0xcc, 0xdc, 0x83, - 0x2d, 0x4c, 0xf5, 0xca, 0x7c, 0x2b, 0x41, 0x10, 0x77, 0x35, 0x81, 0x5e, 0x92, 0x9a, 0x93, 0xbc, - 0x61, 0xa4, 0xc5, 0xb5, 0x22, 0x47, 0x07, 0xe5, 0x29, 0xed, 0x43, 0x74, 0x4d, 0x89, 0xfd, 0x8e, - 0xfc, 0x46, 0xf5, 0x9a, 0xdf, 0x50, 0x8b, 0x25, 0xf1, 0x9a, 0xaf, 0x96, 0x03, 0x8e, 0x71, 0xe8, - 0x1e, 0xa5, 0x22, 0x48, 0xf2, 0x35, 0x9f, 0x0a, 0x28, 0x98, 0x41, 0xec, 0xe7, 0x01, 0x96, 0xef, - 0x93, 0x06, 0x5f, 0xea, 0xfa, 0x03, 0xa4, 0x95, 0xfd, 0x00, 0x69, 0xff, 0x07, 0x0b, 0x26, 0x56, - 0x96, 0x0c, 0x31, 0x71, 0x0e, 0x80, 0xcb, 0x46, 0x6f, 0xbe, 0xb9, 0x26, 0x75, 0xeb, 0x5c, 0x3d, - 0xaa, 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x43, 0xbe, 0xd5, 0xf1, 0x84, 0xc8, 0x32, 0x72, 0x78, 0x50, - 0xce, 0xdf, 0xea, 0x78, 0x98, 0x96, 0x69, 0x16, 0x82, 0xf9, 0x81, 0x2d, 0x04, 0xfb, 0xba, 0xc9, - 0xa1, 0x32, 0x0c, 0xdd, 0xbb, 0xe7, 0x36, 0xb9, 0x33, 0x82, 0xd0, 0xfb, 0xbf, 0xf9, 0x66, 0xb5, - 0x12, 0x62, 0x5e, 0x6e, 0x7f, 0x25, 0x0f, 0xb3, 0x2b, 0x2d, 0x72, 0xff, 0x03, 0x3a, 0x64, 0x0c, - 0x6a, 0xdf, 0x78, 0x3c, 0x7e, 0xf1, 0xb8, 0x36, 0xac, 0xfd, 0xc7, 0x63, 0x13, 0x46, 0xf8, 0x63, - 0xb6, 0x74, 0xcf, 0x78, 0x25, 0xad, 0xf5, 0xec, 0x01, 0x99, 0xe3, 0x8f, 0xe2, 0xc2, 0xc0, 0x5d, - 0xdd, 0xb4, 0xa2, 0x14, 0x4b, 0xe2, 0xb3, 0x9f, 0x81, 0x31, 0x1d, 0xf3, 0x58, 0xd6, 0xe4, 0x7f, - 0x31, 0x0f, 0x53, 0xb4, 0x07, 0x0f, 0x75, 0x22, 0xee, 0x74, 0x4f, 0xc4, 0x49, 0x5b, 0x14, 0xf7, - 0x9f, 0x8d, 0xb7, 0x93, 0xb3, 0xf1, 0x5c, 0xd6, 0x6c, 0x9c, 0xf6, 0x1c, 0x7c, 0xaf, 0x05, 0x67, - 0x56, 0x5a, 0x7e, 0x63, 0x27, 0x61, 0xf5, 0xfb, 0x22, 0x8c, 0xd2, 0x73, 0x3c, 0x34, 0xbc, 0xc1, - 0x0c, 0xff, 0x40, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xce, 0x9d, 0x6a, 0x25, 0xcd, 0xad, 0x50, - 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0xdd, 0x82, 0x8b, 0xd7, 0x97, 0x96, 0xe3, 0xa5, 0xd8, 0xe5, 0xd9, - 0x48, 0xa5, 0xc0, 0xa6, 0xd6, 0x95, 0x58, 0x0a, 0xac, 0xb0, 0x5e, 0x08, 0xe8, 0x47, 0xc5, 0x6b, - 0xf7, 0xa7, 0x2c, 0x38, 0x73, 0xdd, 0x8d, 0xe8, 0xb5, 0x9c, 0xf4, 0xb1, 0xa3, 0xf7, 0x72, 0xe8, - 0x46, 0x7e, 0xb0, 0x9f, 0xf4, 0xb1, 0xc3, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xcb, 0xcc, - 0xa8, 0x72, 0xa6, 0x2a, 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0x35, 0xdd, 0x80, 0x89, 0x12, - 0xfb, 0xe2, 0x84, 0x55, 0x1f, 0x56, 0x91, 0x00, 0x1c, 0xe3, 0xd8, 0x7f, 0x68, 0x41, 0xf9, 0x7a, - 0xab, 0x13, 0x46, 0x24, 0xd8, 0x0c, 0x33, 0x4e, 0xc7, 0xe7, 0xa1, 0x44, 0xa4, 0xe0, 0x2e, 0x7a, - 0xad, 0x58, 0x4d, 0x25, 0xd1, 0x73, 0x57, 0x3f, 0x85, 0x37, 0x80, 0x0f, 0xc1, 0xf1, 0x8c, 0xc0, - 0x57, 0x00, 0x11, 0xbd, 0x2d, 0xdd, 0xf7, 0x91, 0x39, 0x51, 0x2d, 0x77, 0x41, 0x71, 0x4a, 0x0d, - 0xfb, 0x47, 0x2d, 0x38, 0xa7, 0x3e, 0xf8, 0x23, 0xf7, 0x99, 0xf6, 0xcf, 0xe5, 0x60, 0xfc, 0xc6, - 0xfa, 0x7a, 0xed, 0x3a, 0x89, 0xc4, 0xb5, 0xdd, 0x5f, 0xb7, 0x8e, 0x35, 0x15, 0x61, 0x2f, 0x29, - 0xb0, 0x13, 0xb9, 0xad, 0x39, 0xee, 0x42, 0x3f, 0x57, 0xf5, 0xa2, 0xdb, 0x41, 0x3d, 0x0a, 0x5c, - 0x6f, 0x2b, 0x55, 0xa9, 0x28, 0x99, 0x8b, 0x7c, 0x16, 0x73, 0x81, 0x9e, 0x87, 0x61, 0xe6, 0xc3, - 0x2f, 0x27, 0xe1, 0x31, 0x25, 0x44, 0xb1, 0xd2, 0xa3, 0x83, 0x72, 0xe9, 0x0e, 0xae, 0xf2, 0x3f, - 0x58, 0xa0, 0xa2, 0x3b, 0x30, 0xba, 0x1d, 0x45, 0xed, 0x1b, 0xc4, 0x69, 0x92, 0x40, 0x1e, 0x87, - 0x97, 0xd2, 0x8e, 0x43, 0x3a, 0x08, 0x1c, 0x2d, 0x3e, 0x41, 0xe2, 0xb2, 0x10, 0xeb, 0x74, 0xec, - 0x3a, 0x40, 0x0c, 0x3b, 0x21, 0x85, 0x8a, 0xfd, 0xfb, 0x16, 0x8c, 0x70, 0x77, 0xca, 0x00, 0xbd, - 0x0a, 0x05, 0x72, 0x9f, 0x34, 0x04, 0xab, 0x9c, 0xda, 0xe1, 0x98, 0xd3, 0xe2, 0xcf, 0x03, 0xf4, - 0x3f, 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa1, 0xbd, 0xbd, 0xae, 0x7c, 0x4b, 0x1f, 0xcf, 0xfa, 0x62, - 0x35, 0xed, 0x9c, 0x39, 0x13, 0x45, 0x58, 0x56, 0x67, 0xaa, 0xee, 0x46, 0xbb, 0x4e, 0x4f, 0xec, - 0xa8, 0x17, 0x63, 0xb1, 0xbe, 0x54, 0xe3, 0x48, 0x82, 0x1a, 0x57, 0x75, 0xcb, 0x42, 0x1c, 0x13, - 0xb1, 0xd7, 0xa1, 0x44, 0x27, 0x75, 0xa1, 0xe5, 0x3a, 0xbd, 0xb5, 0xec, 0x4f, 0x43, 0x49, 0x6a, - 0xbc, 0x43, 0xe1, 0xc9, 0xc5, 0xa8, 0x4a, 0x85, 0x78, 0x88, 0x63, 0xb8, 0xbd, 0x09, 0x67, 0x99, - 0xa9, 0x83, 0x13, 0x6d, 0x1b, 0x7b, 0xac, 0xff, 0x62, 0x7e, 0x46, 0x48, 0x9e, 0x7c, 0x66, 0x66, - 0x34, 0x67, 0x89, 0x31, 0x49, 0x31, 0x96, 0x42, 0xed, 0x3f, 0x28, 0xc0, 0x63, 0xd5, 0x7a, 0xb6, - 0xa7, 0xed, 0xcb, 0x30, 0xc6, 0xf9, 0x52, 0xba, 0xb4, 0x9d, 0x96, 0x68, 0x57, 0x3d, 0x04, 0xae, - 0x6b, 0x30, 0x6c, 0x60, 0xa2, 0x8b, 0x90, 0x77, 0xdf, 0xf3, 0x92, 0x76, 0xc7, 0xd5, 0x37, 0xd6, - 0x30, 0x2d, 0xa7, 0x60, 0xca, 0xe2, 0xf2, 0xbb, 0x43, 0x81, 0x15, 0x9b, 0xfb, 0x1a, 0x4c, 0xb8, - 0x61, 0x23, 0x74, 0xab, 0x1e, 0x3d, 0x67, 0xb4, 0x93, 0x4a, 0x69, 0x45, 0x68, 0xa7, 0x15, 0x14, - 0x27, 0xb0, 0xb5, 0x8b, 0x6c, 0x68, 0x60, 0x36, 0xb9, 0xaf, 0x6b, 0x13, 0x95, 0x00, 0xda, 0xec, - 0xeb, 0x42, 0x66, 0xc5, 0x27, 0x24, 0x00, 0xfe, 0xc1, 0x21, 0x96, 0x30, 0x2a, 0x72, 0x36, 0xb6, - 0x9d, 0xf6, 0x42, 0x27, 0xda, 0xae, 0xb8, 0x61, 0xc3, 0xdf, 0x23, 0xc1, 0x3e, 0xd3, 0x16, 0x14, - 0x63, 0x91, 0x53, 0x01, 0x96, 0x6e, 0x2c, 0xd4, 0x28, 0x26, 0xee, 0xae, 0x63, 0xb2, 0xc1, 0x70, - 0x12, 0x6c, 0xf0, 0x02, 0x4c, 0xca, 0x66, 0xea, 0x24, 0x64, 0x97, 0xe2, 0x28, 0xeb, 0x98, 0xb2, - 0x2d, 0x16, 0xc5, 0xaa, 0x5b, 0x49, 0x7c, 0xf4, 0x12, 0x8c, 0xbb, 0x9e, 0x1b, 0xb9, 0x4e, 0xe4, - 0x07, 0x8c, 0xa5, 0xe0, 0x8a, 0x01, 0x66, 0xba, 0x57, 0xd5, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0x4b, - 0x01, 0xa6, 0xd9, 0xb4, 0x7d, 0x6b, 0x85, 0x7d, 0x64, 0x56, 0xd8, 0x9d, 0xee, 0x15, 0x76, 0x12, - 0xfc, 0xfd, 0x87, 0xb9, 0xcc, 0xde, 0x85, 0x92, 0x32, 0x7e, 0x96, 0xde, 0x0f, 0x56, 0x86, 0xf7, - 0x43, 0x7f, 0xee, 0x43, 0xbe, 0x5b, 0xe7, 0x53, 0xdf, 0xad, 0xff, 0xb6, 0x05, 0xb1, 0x0d, 0x28, - 0xba, 0x01, 0xa5, 0xb6, 0xcf, 0xec, 0x2c, 0x02, 0x69, 0xbc, 0xf4, 0x58, 0xea, 0x45, 0xc5, 0x2f, - 0x45, 0x3e, 0x7e, 0x35, 0x59, 0x03, 0xc7, 0x95, 0xd1, 0x22, 0x8c, 0xb4, 0x03, 0x52, 0x8f, 0x98, - 0xcf, 0x6f, 0x5f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0x3f, 0x6f, 0x01, 0xf0, 0xa7, - 0x61, 0xc7, 0xdb, 0x22, 0xa7, 0xa0, 0xee, 0xae, 0x40, 0x21, 0x6c, 0x93, 0x46, 0x2f, 0x0b, 0x98, - 0xb8, 0x3f, 0xf5, 0x36, 0x69, 0xc4, 0x03, 0x4e, 0xff, 0x61, 0x56, 0xdb, 0xfe, 0x3e, 0x80, 0x89, - 0x18, 0xad, 0x1a, 0x91, 0x5d, 0xf4, 0xac, 0xe1, 0x03, 0x78, 0x3e, 0xe1, 0x03, 0x58, 0x62, 0xd8, - 0x9a, 0x66, 0xf5, 0x5d, 0xc8, 0xef, 0x3a, 0xf7, 0x85, 0xea, 0xec, 0xe9, 0xde, 0xdd, 0xa0, 0xf4, - 0xe7, 0x56, 0x9d, 0xfb, 0x5c, 0x48, 0x7c, 0x5a, 0x2e, 0x90, 0x55, 0xe7, 0xfe, 0x11, 0xb7, 0x73, - 0x61, 0x87, 0xd4, 0x2d, 0x37, 0x8c, 0xbe, 0xf4, 0x9f, 0xe3, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, - 0x5b, 0xae, 0x27, 0x1e, 0x4a, 0x07, 0x6a, 0xcb, 0xf5, 0x92, 0x6d, 0xb9, 0xde, 0x00, 0x6d, 0xb9, - 0x1e, 0x7a, 0x1f, 0x46, 0x84, 0x51, 0x82, 0xf0, 0xb9, 0x9f, 0x1f, 0xa0, 0x3d, 0x61, 0xd3, 0xc0, - 0xdb, 0x9c, 0x97, 0x42, 0xb0, 0x28, 0xed, 0xdb, 0xae, 0x6c, 0x10, 0xfd, 0x0d, 0x0b, 0x26, 0xc4, - 0x6f, 0x4c, 0xde, 0xeb, 0x90, 0x30, 0x12, 0xbc, 0xe7, 0xa7, 0x07, 0xef, 0x83, 0xa8, 0xc8, 0xbb, - 0xf2, 0x69, 0x79, 0xcc, 0x9a, 0xc0, 0xbe, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x23, 0x0b, 0xce, 0xee, - 0x3a, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0x91, 0xeb, 0x0b, 0x63, 0xfd, 0x57, 0x07, 0x9b, 0xfe, - 0xae, 0xea, 0xbc, 0x93, 0xd2, 0xae, 0xf7, 0x6c, 0x1a, 0x4a, 0xdf, 0xae, 0xa6, 0xf6, 0x6b, 0x76, - 0x13, 0x8a, 0x72, 0xbd, 0xa5, 0xa8, 0x1a, 0x2a, 0x3a, 0x63, 0x7d, 0x6c, 0x9b, 0x10, 0xdd, 0x11, - 0x8f, 0xb6, 0x23, 0xd6, 0xda, 0x43, 0x6d, 0xe7, 0x5d, 0x18, 0xd3, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, - 0x3d, 0x38, 0x93, 0xb2, 0x96, 0x1e, 0x6a, 0x93, 0xf7, 0xe0, 0x7c, 0xe6, 0xfa, 0x78, 0x98, 0x0d, - 0xdb, 0x3f, 0x67, 0xe9, 0xe7, 0xe0, 0x29, 0xbc, 0x39, 0x2c, 0x99, 0x6f, 0x0e, 0x97, 0x7a, 0xef, - 0x9c, 0x8c, 0x87, 0x87, 0xb7, 0xf5, 0x4e, 0xd3, 0x53, 0x1d, 0xbd, 0x0e, 0xc3, 0x2d, 0x5a, 0x22, - 0xad, 0x61, 0xec, 0xfe, 0x3b, 0x32, 0xe6, 0xa5, 0x58, 0x79, 0x88, 0x05, 0x05, 0xfb, 0x97, 0x2c, - 0x28, 0x9c, 0xc2, 0x48, 0x60, 0x73, 0x24, 0x9e, 0xcd, 0x24, 0x2d, 0x62, 0xf1, 0xcd, 0x61, 0xe7, - 0xde, 0xf2, 0xfd, 0x88, 0x78, 0x21, 0x13, 0x15, 0x53, 0x07, 0xe6, 0xbb, 0xe0, 0xcc, 0x2d, 0xdf, - 0x69, 0x2e, 0x3a, 0x2d, 0xc7, 0x6b, 0x90, 0xa0, 0xea, 0x6d, 0xf5, 0x35, 0xcb, 0xd2, 0x8d, 0xa8, - 0x72, 0xfd, 0x8c, 0xa8, 0xec, 0x6d, 0x40, 0x7a, 0x03, 0xc2, 0x70, 0x15, 0xc3, 0x88, 0xcb, 0x9b, - 0x12, 0xc3, 0xff, 0x64, 0x3a, 0x77, 0xd7, 0xd5, 0x33, 0xcd, 0x24, 0x93, 0x17, 0x60, 0x49, 0xc8, - 0x7e, 0x19, 0x52, 0x9d, 0xd5, 0xfa, 0xab, 0x0d, 0xec, 0xcf, 0xc3, 0x34, 0xab, 0x79, 0x4c, 0x91, - 0xd6, 0x4e, 0x68, 0x25, 0x53, 0x22, 0xd3, 0xd8, 0x5f, 0xb6, 0x60, 0x72, 0x2d, 0x11, 0xb0, 0xe3, - 0x0a, 0x7b, 0x00, 0x4d, 0x51, 0x86, 0xd7, 0x59, 0x29, 0x16, 0xd0, 0x13, 0xd7, 0x41, 0xfd, 0x99, - 0x05, 0xb1, 0xff, 0xe8, 0x29, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0xaa, 0x1b, 0x51, 0xdd, 0xc9, - 0xe2, 0xbb, 0xd0, 0x4d, 0x15, 0x2c, 0xa1, 0x87, 0x5a, 0x24, 0x26, 0xc3, 0x5d, 0xeb, 0x27, 0xcc, - 0x88, 0x0a, 0x32, 0x7c, 0x02, 0xb3, 0x9d, 0x52, 0xb8, 0x1f, 0x11, 0xdb, 0x29, 0xd5, 0x9f, 0x8c, - 0x1d, 0x5a, 0xd3, 0xba, 0xcc, 0x4e, 0xae, 0x6f, 0x67, 0xb6, 0xf0, 0x4e, 0xcb, 0x7d, 0x9f, 0xa8, - 0x88, 0x2f, 0x65, 0x61, 0xdb, 0x2e, 0x4a, 0x8f, 0x0e, 0xca, 0xe3, 0xea, 0x1f, 0x0f, 0xef, 0x16, - 0x57, 0xb1, 0x6f, 0xc0, 0x64, 0x62, 0xc0, 0xd0, 0x8b, 0x30, 0xd4, 0xde, 0x76, 0x42, 0x92, 0xb0, - 0x17, 0x1d, 0xaa, 0xd1, 0xc2, 0xa3, 0x83, 0xf2, 0x84, 0xaa, 0xc0, 0x4a, 0x30, 0xc7, 0xb6, 0xff, - 0x87, 0x05, 0x85, 0x35, 0xbf, 0x79, 0x1a, 0x8b, 0xe9, 0x35, 0x63, 0x31, 0x5d, 0xc8, 0x0a, 0x8e, - 0x99, 0xb9, 0x8e, 0x56, 0x12, 0xeb, 0xe8, 0x52, 0x26, 0x85, 0xde, 0x4b, 0x68, 0x17, 0x46, 0x59, - 0xc8, 0x4d, 0x61, 0xbf, 0xfa, 0xbc, 0x21, 0x03, 0x94, 0x13, 0x32, 0xc0, 0xa4, 0x86, 0xaa, 0x49, - 0x02, 0x4f, 0xc1, 0x88, 0xb0, 0xa1, 0x4c, 0x5a, 0xfd, 0x0b, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0xe5, - 0xc1, 0x08, 0xf1, 0x89, 0x7e, 0xc5, 0x82, 0xb9, 0x80, 0xbb, 0x51, 0x36, 0x2b, 0x9d, 0xc0, 0xf5, - 0xb6, 0xea, 0x8d, 0x6d, 0xd2, 0xec, 0xb4, 0x5c, 0x6f, 0xab, 0xba, 0xe5, 0xf9, 0xaa, 0x78, 0xf9, - 0x3e, 0x69, 0x74, 0xd8, 0x43, 0x48, 0x9f, 0x78, 0xa2, 0xca, 0x46, 0xe9, 0xda, 0xe1, 0x41, 0x79, - 0x0e, 0x1f, 0x8b, 0x36, 0x3e, 0x66, 0x5f, 0xd0, 0xd7, 0x2d, 0x98, 0xe7, 0x91, 0x2f, 0x07, 0xef, - 0x7f, 0x0f, 0x89, 0xa9, 0x26, 0x49, 0xc5, 0x44, 0xd6, 0x49, 0xb0, 0xbb, 0xf8, 0x92, 0x18, 0xd0, - 0xf9, 0xda, 0xf1, 0xda, 0xc2, 0xc7, 0xed, 0x9c, 0xfd, 0x2f, 0xf3, 0x30, 0x2e, 0x3c, 0xf8, 0x45, - 0x68, 0x98, 0x17, 0x8d, 0x25, 0xf1, 0x78, 0x62, 0x49, 0x4c, 0x1b, 0xc8, 0x27, 0x13, 0x15, 0x26, - 0x84, 0xe9, 0x96, 0x13, 0x46, 0x37, 0x88, 0x13, 0x44, 0x1b, 0xc4, 0xe1, 0xb6, 0x3b, 0xf9, 0x63, - 0xdb, 0x19, 0x29, 0x15, 0xcd, 0xad, 0x24, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x01, 0x62, 0x06, 0x48, - 0x81, 0xe3, 0x85, 0xfc, 0x5b, 0x5c, 0xf1, 0x66, 0x70, 0xbc, 0x56, 0x67, 0x45, 0xab, 0xe8, 0x56, - 0x17, 0x35, 0x9c, 0xd2, 0x82, 0x66, 0x58, 0x36, 0x34, 0xa8, 0x61, 0xd9, 0x70, 0x1f, 0xd7, 0x1a, - 0x0f, 0xa6, 0xba, 0x82, 0x30, 0xbc, 0x05, 0x25, 0x65, 0x00, 0x28, 0x0e, 0x9d, 0xde, 0xb1, 0x4c, - 0x92, 0x14, 0xb8, 0x1a, 0x25, 0x36, 0x3e, 0x8d, 0xc9, 0xd9, 0xff, 0x38, 0x67, 0x34, 0xc8, 0x27, - 0x71, 0x0d, 0x8a, 0x4e, 0x18, 0xba, 0x5b, 0x1e, 0x69, 0x8a, 0x1d, 0xfb, 0xf1, 0xac, 0x1d, 0x6b, - 0x34, 0xc3, 0x8c, 0x30, 0x17, 0x44, 0x4d, 0xac, 0x68, 0xa0, 0x1b, 0xdc, 0x42, 0x6a, 0x4f, 0xf2, - 0xfc, 0x83, 0x51, 0x03, 0x69, 0x43, 0xb5, 0x47, 0xb0, 0xa8, 0x8f, 0xbe, 0xc0, 0x4d, 0xd8, 0x6e, - 0x7a, 0xfe, 0x3d, 0xef, 0xba, 0xef, 0x4b, 0xb7, 0xbb, 0xc1, 0x08, 0x4e, 0x4b, 0xc3, 0x35, 0x55, - 0x1d, 0x9b, 0xd4, 0x06, 0x0b, 0x54, 0xf4, 0xdd, 0x70, 0x86, 0x92, 0x36, 0x9d, 0x67, 0x42, 0x44, - 0x60, 0x52, 0x84, 0x87, 0x90, 0x65, 0x62, 0xec, 0x52, 0xd9, 0x79, 0xb3, 0x76, 0xac, 0xf4, 0xbb, - 0x69, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x4f, 0x5a, 0xc0, 0xcc, 0xfe, 0x4f, 0x81, 0x65, 0xf8, 0xac, - 0xc9, 0x32, 0xcc, 0x64, 0x0d, 0x72, 0x06, 0xb7, 0xf0, 0x02, 0x5f, 0x59, 0xb5, 0xc0, 0xbf, 0xbf, - 0x2f, 0xcc, 0x07, 0xfa, 0x73, 0xb2, 0xf6, 0xff, 0xb1, 0xf8, 0x21, 0xa6, 0x3c, 0xf1, 0xd1, 0xf7, - 0x40, 0xb1, 0xe1, 0xb4, 0x9d, 0x06, 0x8f, 0x47, 0x9d, 0xa9, 0xd5, 0x31, 0x2a, 0xcd, 0x2d, 0x89, - 0x1a, 0x5c, 0x4b, 0x21, 0xc3, 0x8c, 0x14, 0x65, 0x71, 0x5f, 0xcd, 0x84, 0x6a, 0x72, 0x76, 0x07, - 0xc6, 0x0d, 0x62, 0x0f, 0x55, 0xa4, 0xfd, 0x1e, 0x7e, 0xc5, 0xaa, 0xb0, 0x38, 0xbb, 0x30, 0xed, - 0x69, 0xff, 0xe9, 0x85, 0x22, 0xc5, 0x94, 0x8f, 0xf7, 0xbb, 0x44, 0xd9, 0xed, 0xa3, 0xb9, 0x35, - 0x24, 0xc8, 0xe0, 0x6e, 0xca, 0xf6, 0x8f, 0x5b, 0xf0, 0xa8, 0x8e, 0xa8, 0x05, 0x49, 0xe8, 0xa7, - 0x27, 0xae, 0x40, 0xd1, 0x6f, 0x93, 0xc0, 0x89, 0xfc, 0x40, 0xdc, 0x1a, 0x57, 0xe5, 0xa0, 0xdf, - 0x16, 0xe5, 0x47, 0x22, 0xa0, 0xa4, 0xa4, 0x2e, 0xcb, 0xb1, 0xaa, 0x49, 0xe5, 0x18, 0x36, 0x18, - 0xa1, 0x08, 0x60, 0xc1, 0xce, 0x00, 0xf6, 0x64, 0x1a, 0x62, 0x01, 0xb1, 0xff, 0xc0, 0xe2, 0x0b, - 0x4b, 0xef, 0x3a, 0x7a, 0x0f, 0xa6, 0x76, 0x9d, 0xa8, 0xb1, 0xbd, 0x7c, 0xbf, 0x1d, 0x70, 0xf5, - 0xb8, 0x1c, 0xa7, 0xa7, 0xfb, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x55, 0xde, 0x6a, 0x82, 0x18, 0xee, - 0x22, 0x8f, 0x36, 0x60, 0x94, 0x95, 0x31, 0xf3, 0xef, 0xb0, 0x17, 0x6b, 0x90, 0xd5, 0x9a, 0x7a, - 0x75, 0x5e, 0x8d, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xa5, 0x3c, 0xdf, 0xed, 0x8c, 0xdb, 0x7e, 0x0a, - 0x46, 0xda, 0x7e, 0x73, 0xa9, 0x5a, 0xc1, 0x62, 0x16, 0xd4, 0x35, 0x52, 0xe3, 0xc5, 0x58, 0xc2, - 0xd1, 0x2b, 0x00, 0xe4, 0x7e, 0x44, 0x02, 0xcf, 0x69, 0x29, 0x2b, 0x19, 0x65, 0x17, 0x5a, 0xf1, - 0xd7, 0xfc, 0xe8, 0x4e, 0x48, 0xbe, 0x6b, 0x59, 0xa1, 0x60, 0x0d, 0x1d, 0x5d, 0x03, 0x68, 0x07, - 0xfe, 0x9e, 0xdb, 0x64, 0xfe, 0x84, 0x79, 0xd3, 0x86, 0xa4, 0xa6, 0x20, 0x58, 0xc3, 0x42, 0xaf, - 0xc0, 0x78, 0xc7, 0x0b, 0x39, 0x87, 0xe2, 0x6c, 0x88, 0x70, 0x8c, 0xc5, 0xd8, 0xba, 0xe1, 0x8e, - 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x80, 0xe1, 0xc8, 0x61, 0x36, 0x11, 0x43, 0xd9, 0xc6, 0x9c, 0xeb, - 0x14, 0x43, 0x8f, 0x86, 0x4c, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x25, 0x9d, 0x33, 0xf8, 0x59, 0x2f, - 0xac, 0xa8, 0x07, 0xbb, 0x17, 0x34, 0xd7, 0x0c, 0x61, 0x9d, 0x6d, 0xd0, 0xb2, 0xbf, 0x5e, 0x02, - 0x88, 0xd9, 0x71, 0xf4, 0x7e, 0xd7, 0x79, 0xf4, 0x4c, 0x6f, 0x06, 0xfe, 0xe4, 0x0e, 0x23, 0xf4, - 0xfd, 0x16, 0x8c, 0x3a, 0xad, 0x96, 0xdf, 0x70, 0x22, 0x36, 0xca, 0xb9, 0xde, 0xe7, 0xa1, 0x68, - 0x7f, 0x21, 0xae, 0xc1, 0xbb, 0xf0, 0xbc, 0x5c, 0x78, 0x1a, 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, - 0xa7, 0xa4, 0x94, 0xc6, 0x97, 0xc7, 0x6c, 0x52, 0x4a, 0x2b, 0xb1, 0xa3, 0x5f, 0x13, 0xd0, 0xd0, - 0x1d, 0x23, 0xd2, 0x5e, 0x21, 0x3b, 0xe8, 0x84, 0xc1, 0x95, 0xf6, 0x0b, 0xb2, 0x87, 0x6a, 0xba, - 0x37, 0xd9, 0x50, 0x76, 0x64, 0x16, 0x4d, 0xfc, 0xe9, 0xe3, 0x49, 0xf6, 0x2e, 0x4c, 0x36, 0xcd, - 0xbb, 0x5d, 0xac, 0xa6, 0x27, 0xb3, 0xe8, 0x26, 0x58, 0x81, 0xf8, 0x36, 0x4f, 0x00, 0x70, 0x92, - 0x30, 0xaa, 0x71, 0xbf, 0xbe, 0xaa, 0xb7, 0xe9, 0x0b, 0x6b, 0x7c, 0x3b, 0x73, 0x2e, 0xf7, 0xc3, - 0x88, 0xec, 0x52, 0xcc, 0xf8, 0xd2, 0x5e, 0x13, 0x75, 0xb1, 0xa2, 0x82, 0x5e, 0x87, 0x61, 0xe6, - 0x18, 0x1c, 0xce, 0x14, 0xb3, 0x95, 0x89, 0x66, 0x4c, 0x8b, 0x78, 0x53, 0xb1, 0xbf, 0x21, 0x16, - 0x14, 0xd0, 0x0d, 0x19, 0xf8, 0x26, 0xac, 0x7a, 0x77, 0x42, 0xc2, 0x02, 0xdf, 0x94, 0x16, 0x3f, - 0x1e, 0xc7, 0xb4, 0xe1, 0xe5, 0xa9, 0x79, 0x0f, 0x8c, 0x9a, 0x94, 0x39, 0x12, 0xff, 0x65, 0x3a, - 0x85, 0x19, 0xc8, 0xee, 0x9e, 0x99, 0x72, 0x21, 0x1e, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa4, 0x49, - 0x19, 0x4d, 0xbe, 0x73, 0x85, 0x3d, 0x7f, 0xbf, 0xfd, 0xcf, 0xe5, 0x6b, 0x76, 0xc9, 0xf0, 0x12, - 0x2c, 0xea, 0x9f, 0xea, 0xad, 0x3f, 0xeb, 0xc1, 0x54, 0x72, 0x8b, 0x3e, 0x54, 0x2e, 0xe3, 0xf7, - 0x0b, 0x30, 0x61, 0x2e, 0x29, 0x34, 0x0f, 0x25, 0x41, 0x44, 0x45, 0x61, 0x55, 0xbb, 0x64, 0x55, - 0x02, 0x70, 0x8c, 0xc3, 0x82, 0xef, 0xb2, 0xea, 0x9a, 0x1d, 0x66, 0x1c, 0x7c, 0x57, 0x41, 0xb0, - 0x86, 0x45, 0xe5, 0xa5, 0x0d, 0xdf, 0x8f, 0xd4, 0xa5, 0xa2, 0xd6, 0xdd, 0x22, 0x2b, 0xc5, 0x02, - 0x4a, 0x2f, 0x93, 0x1d, 0x12, 0x78, 0xa4, 0x65, 0x06, 0x77, 0x53, 0x97, 0xc9, 0x4d, 0x1d, 0x88, - 0x4d, 0x5c, 0x7a, 0x4b, 0xfa, 0x21, 0x5b, 0xc8, 0x42, 0x2a, 0x8b, 0xed, 0x5a, 0xeb, 0xdc, 0xc5, - 0x5e, 0xc2, 0xd1, 0xe7, 0xe1, 0x51, 0xe5, 0x11, 0x8f, 0xb9, 0xa2, 0x5a, 0xb6, 0x38, 0x6c, 0x28, - 0x51, 0x1e, 0x5d, 0x4a, 0x47, 0xc3, 0x59, 0xf5, 0xd1, 0x6b, 0x30, 0x21, 0x38, 0x77, 0x49, 0x71, - 0xc4, 0xb4, 0x9d, 0xb8, 0x69, 0x40, 0x71, 0x02, 0x5b, 0x86, 0xa7, 0x63, 0xcc, 0xb3, 0xa4, 0x50, - 0xec, 0x0e, 0x4f, 0xa7, 0xc3, 0x71, 0x57, 0x0d, 0xb4, 0x00, 0x93, 0x9c, 0xb5, 0x72, 0xbd, 0x2d, - 0x3e, 0x27, 0xc2, 0xdd, 0x46, 0x6d, 0xa9, 0xdb, 0x26, 0x18, 0x27, 0xf1, 0xd1, 0xcb, 0x30, 0xe6, - 0x04, 0x8d, 0x6d, 0x37, 0x22, 0x8d, 0xa8, 0x13, 0x70, 0x3f, 0x1c, 0xcd, 0xf8, 0x64, 0x41, 0x83, - 0x61, 0x03, 0xd3, 0x7e, 0x1f, 0xce, 0xa4, 0x78, 0xea, 0xd1, 0x85, 0xe3, 0xb4, 0x5d, 0xf9, 0x4d, - 0x09, 0x0b, 0xd5, 0x85, 0x5a, 0x55, 0x7e, 0x8d, 0x86, 0x45, 0x57, 0x27, 0xf3, 0xe8, 0xd3, 0xb2, - 0xa7, 0xa8, 0xd5, 0xb9, 0x22, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x67, 0x0e, 0x26, 0x53, 0x94, 0xef, - 0x2c, 0x83, 0x47, 0x42, 0xf6, 0x88, 0x13, 0x76, 0x98, 0xd1, 0x0e, 0x73, 0xc7, 0x88, 0x76, 0x98, - 0xef, 0x17, 0xed, 0xb0, 0xf0, 0x41, 0xa2, 0x1d, 0x9a, 0x23, 0x36, 0x34, 0xd0, 0x88, 0xa5, 0x44, - 0x48, 0x1c, 0x3e, 0x66, 0x84, 0x44, 0x63, 0xd0, 0x47, 0x06, 0x18, 0xf4, 0xaf, 0xe6, 0x60, 0x2a, - 0x69, 0x24, 0x77, 0x0a, 0xea, 0xd8, 0xd7, 0x0d, 0x75, 0x6c, 0x7a, 0x3e, 0x9c, 0xa4, 0xe9, 0x5e, - 0x96, 0x6a, 0x16, 0x27, 0x54, 0xb3, 0x9f, 0x1c, 0x88, 0x5a, 0x6f, 0x35, 0xed, 0xdf, 0xcd, 0xc1, - 0xb9, 0x64, 0x95, 0xa5, 0x96, 0xe3, 0xee, 0x9e, 0xc2, 0xd8, 0xdc, 0x36, 0xc6, 0xe6, 0xd9, 0x41, - 0xbe, 0x86, 0x75, 0x2d, 0x73, 0x80, 0xde, 0x4c, 0x0c, 0xd0, 0xfc, 0xe0, 0x24, 0x7b, 0x8f, 0xd2, - 0x37, 0xf2, 0x70, 0x29, 0xb5, 0x5e, 0xac, 0xcd, 0x5c, 0x31, 0xb4, 0x99, 0xd7, 0x12, 0xda, 0x4c, - 0xbb, 0x77, 0xed, 0x93, 0x51, 0x6f, 0x0a, 0x17, 0x4a, 0x16, 0x11, 0xef, 0x01, 0x55, 0x9b, 0x86, - 0x0b, 0xa5, 0x22, 0x84, 0x4d, 0xba, 0xdf, 0x4c, 0x2a, 0xcd, 0x7f, 0x63, 0xc1, 0xf9, 0xd4, 0xb9, - 0x39, 0x05, 0x15, 0xd6, 0x9a, 0xa9, 0xc2, 0x7a, 0x6a, 0xe0, 0xd5, 0x9a, 0xa1, 0xd3, 0xfa, 0x8d, - 0x42, 0xc6, 0xb7, 0x30, 0x01, 0xfd, 0x36, 0x8c, 0x3a, 0x8d, 0x06, 0x09, 0xc3, 0x55, 0xbf, 0xa9, - 0x22, 0xc4, 0x3d, 0xcb, 0xe4, 0xac, 0xb8, 0xf8, 0xe8, 0xa0, 0x3c, 0x9b, 0x24, 0x11, 0x83, 0xb1, - 0x4e, 0xc1, 0x0c, 0x6a, 0x99, 0x3b, 0xd1, 0xa0, 0x96, 0xd7, 0x00, 0xf6, 0x14, 0xb7, 0x9e, 0x14, - 0xf2, 0x35, 0x3e, 0x5e, 0xc3, 0x42, 0x5f, 0x80, 0x62, 0x28, 0xae, 0x71, 0xb1, 0x14, 0x9f, 0x1f, - 0x70, 0xae, 0x9c, 0x0d, 0xd2, 0x32, 0x7d, 0xf5, 0x95, 0x3e, 0x44, 0x91, 0x44, 0xdf, 0x01, 0x53, - 0x21, 0x0f, 0x05, 0xb3, 0xd4, 0x72, 0x42, 0xe6, 0x07, 0x21, 0x56, 0x21, 0x73, 0xc0, 0xaf, 0x27, - 0x60, 0xb8, 0x0b, 0x1b, 0xad, 0xc8, 0x8f, 0x62, 0x71, 0x6b, 0xf8, 0xc2, 0xbc, 0x12, 0x7f, 0x90, - 0xc8, 0x1f, 0x76, 0x36, 0x39, 0xfc, 0x6c, 0xe0, 0xb5, 0x9a, 0xe8, 0x0b, 0x00, 0x74, 0xf9, 0x08, - 0x5d, 0xc2, 0x48, 0xf6, 0xe1, 0x49, 0x4f, 0x95, 0x66, 0xaa, 0xe5, 0x27, 0x73, 0x5e, 0xac, 0x28, - 0x22, 0x58, 0x23, 0x68, 0x7f, 0xb5, 0x00, 0x8f, 0xf5, 0x38, 0x23, 0xd1, 0x82, 0xf9, 0x04, 0xfa, - 0x74, 0x52, 0xb8, 0x9e, 0x4d, 0xad, 0x6c, 0x48, 0xdb, 0x89, 0xa5, 0x98, 0xfb, 0xc0, 0x4b, 0xf1, - 0x07, 0x2d, 0x4d, 0xed, 0xc1, 0x8d, 0xf9, 0x3e, 0x7b, 0xcc, 0xb3, 0xff, 0x04, 0xf5, 0x20, 0x9b, - 0x29, 0xca, 0x84, 0x6b, 0x03, 0x77, 0x67, 0x60, 0xed, 0xc2, 0xe9, 0x2a, 0x7f, 0xbf, 0x64, 0xc1, - 0xe3, 0xa9, 0xfd, 0x35, 0x4c, 0x36, 0xe6, 0xa1, 0xd4, 0xa0, 0x85, 0x9a, 0xaf, 0x5a, 0xec, 0xc4, - 0x2b, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x33, 0x72, 0x7d, 0x2d, 0x33, 0xfe, 0x85, 0x05, 0x5d, 0xfb, - 0xe3, 0x14, 0x0e, 0xea, 0xaa, 0x79, 0x50, 0x7f, 0x7c, 0x90, 0xb9, 0xcc, 0x38, 0xa3, 0xff, 0x68, - 0x12, 0x1e, 0xc9, 0xf0, 0xd5, 0xd8, 0x83, 0xe9, 0xad, 0x06, 0x31, 0xbd, 0x00, 0xc5, 0xc7, 0xa4, - 0x3a, 0x4c, 0xf6, 0x74, 0x19, 0x64, 0xf9, 0x88, 0xa6, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x4b, - 0x16, 0x9c, 0x75, 0xee, 0x85, 0x5d, 0xd9, 0x43, 0xc5, 0x9a, 0x79, 0x21, 0x55, 0x09, 0xd2, 0x27, - 0xdb, 0x28, 0x4f, 0xd0, 0x94, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x58, 0xc4, 0x0c, 0xa5, 0xec, 0x7c, - 0x0f, 0x3f, 0xd5, 0x34, 0xa7, 0x1a, 0x7e, 0x64, 0x4b, 0x08, 0x56, 0x74, 0xd0, 0x3b, 0x50, 0xda, - 0x92, 0x9e, 0x6e, 0x29, 0x57, 0x42, 0x3c, 0x90, 0xbd, 0xfd, 0xff, 0xf8, 0x03, 0xa5, 0x42, 0xc2, - 0x31, 0x51, 0xf4, 0x1a, 0xe4, 0xbd, 0xcd, 0xb0, 0x57, 0x8e, 0xa3, 0x84, 0x4d, 0x13, 0xf7, 0x06, - 0x5f, 0x5b, 0xa9, 0x63, 0x5a, 0x11, 0xdd, 0x80, 0x7c, 0xb0, 0xd1, 0x14, 0x1a, 0xbc, 0xd4, 0x33, - 0x1c, 0x2f, 0x56, 0x32, 0x7a, 0xc5, 0x28, 0xe1, 0xc5, 0x0a, 0xa6, 0x24, 0x50, 0x0d, 0x86, 0x98, - 0x83, 0x83, 0xb8, 0x0f, 0x52, 0x39, 0xdf, 0x1e, 0x8e, 0x42, 0xdc, 0x65, 0x9c, 0x21, 0x60, 0x4e, - 0x08, 0xad, 0xc3, 0x70, 0x83, 0xe5, 0xc3, 0x11, 0x01, 0xab, 0x3f, 0x95, 0xaa, 0xab, 0xeb, 0x91, - 0x28, 0x48, 0xa8, 0xae, 0x18, 0x06, 0x16, 0xb4, 0x18, 0x55, 0xd2, 0xde, 0xde, 0x0c, 0x99, 0xac, - 0x9f, 0x45, 0xb5, 0x47, 0xfe, 0x2b, 0x41, 0x95, 0x61, 0x60, 0x41, 0x0b, 0x7d, 0x06, 0x72, 0x9b, - 0x0d, 0xe1, 0xff, 0x90, 0xaa, 0xb4, 0x33, 0x1d, 0xfa, 0x17, 0x87, 0x0f, 0x0f, 0xca, 0xb9, 0x95, - 0x25, 0x9c, 0xdb, 0x6c, 0xa0, 0x35, 0x18, 0xd9, 0xe4, 0x2e, 0xc0, 0x42, 0x2f, 0xf7, 0x64, 0xba, - 0x77, 0x72, 0x97, 0x97, 0x30, 0xb7, 0xdb, 0x17, 0x00, 0x2c, 0x89, 0xb0, 0x10, 0x9c, 0xca, 0x95, - 0x59, 0xc4, 0xa2, 0x9e, 0x3b, 0x9e, 0xfb, 0x39, 0xbf, 0x9f, 0x63, 0x87, 0x68, 0xac, 0x51, 0xa4, - 0xab, 0xda, 0x91, 0x19, 0x2c, 0x45, 0xac, 0x8e, 0xd4, 0x55, 0xdd, 0x27, 0xb9, 0x27, 0x5f, 0xd5, - 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, 0xb7, 0x34, 0x0b, 0xdd, - 0x91, 0x71, 0x85, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, 0x29, 0xc4, 0x5e, 0xb5, - 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, 0x3f, 0x22, 0x22, 0x78, - 0x75, 0xea, 0xf0, 0xbf, 0xc1, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, 0x82, 0xee, 0x8a, 0xe1, - 0x61, 0xa7, 0xe7, 0x54, 0x76, 0x30, 0xa5, 0xd4, 0x14, 0xb2, 0xda, 0xa0, 0xb0, 0xd3, 0x32, 0x26, - 0xc5, 0x4e, 0xc9, 0xf6, 0xb6, 0x1f, 0xf9, 0x5e, 0xe2, 0x84, 0x9e, 0xce, 0x3e, 0x25, 0x6b, 0x29, - 0xf8, 0xdd, 0xa7, 0x64, 0x1a, 0x16, 0x4e, 0x6d, 0x0b, 0x35, 0x61, 0xa2, 0xed, 0x07, 0xd1, 0x3d, - 0x3f, 0x90, 0xeb, 0x0b, 0xf5, 0xd0, 0x2b, 0x18, 0x98, 0xa2, 0x45, 0x16, 0x4c, 0xdd, 0x84, 0xe0, - 0x04, 0x4d, 0xf4, 0x39, 0x18, 0x09, 0x1b, 0x4e, 0x8b, 0x54, 0x6f, 0xcf, 0x9c, 0xc9, 0xbe, 0x7e, - 0xea, 0x1c, 0x25, 0x63, 0x75, 0xb1, 0xc9, 0x11, 0x28, 0x58, 0x92, 0x43, 0x2b, 0x30, 0xc4, 0x32, - 0x22, 0xb0, 0xb8, 0xdb, 0x19, 0x31, 0xa1, 0xba, 0x2c, 0x4c, 0xf9, 0xd9, 0xc4, 0x8a, 0x31, 0xaf, - 0x4e, 0xf7, 0x80, 0x60, 0xaf, 0xfd, 0x70, 0xe6, 0x5c, 0xf6, 0x1e, 0x10, 0x5c, 0xf9, 0xed, 0x7a, - 0xaf, 0x3d, 0xa0, 0x90, 0x70, 0x4c, 0x94, 0x9e, 0xcc, 0xf4, 0x34, 0x7d, 0xa4, 0x87, 0x41, 0x4b, - 0xe6, 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x8e, 0x74, 0xf3, 0x2c, 0x4c, - 0x20, 0xfb, 0x4b, 0x56, 0xd7, 0x5b, 0xdd, 0xa7, 0x07, 0xd5, 0x0f, 0x9d, 0x20, 0xb7, 0xfa, 0x25, - 0x0b, 0x1e, 0x69, 0xa7, 0x7e, 0x88, 0x60, 0x00, 0x06, 0x53, 0x33, 0xf1, 0x4f, 0x57, 0xb1, 0xf1, - 0xd3, 0xe1, 0x38, 0xa3, 0xa5, 0xa4, 0x44, 0x90, 0xff, 0xc0, 0x12, 0xc1, 0x2a, 0x14, 0x19, 0x93, - 0xd9, 0x27, 0x3f, 0x5c, 0x52, 0x30, 0x62, 0xac, 0xc4, 0x92, 0xa8, 0x88, 0x15, 0x09, 0xf4, 0x43, - 0x16, 0x5c, 0x4c, 0x76, 0x1d, 0x13, 0x06, 0x16, 0x91, 0xe4, 0xb9, 0x2c, 0xb8, 0x22, 0xbe, 0xff, - 0x62, 0xad, 0x17, 0xf2, 0x51, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0x49, 0x11, 0x46, 0x87, 0x4d, - 0x05, 0xfc, 0x00, 0x02, 0xe9, 0x0b, 0x30, 0xb6, 0xeb, 0x77, 0xbc, 0x48, 0xd8, 0xbf, 0x08, 0x8f, - 0x45, 0xf6, 0xe0, 0xbc, 0xaa, 0x95, 0x63, 0x03, 0x2b, 0x21, 0xc6, 0x16, 0x1f, 0x58, 0x8c, 0x7d, - 0x3b, 0x91, 0xcd, 0xbd, 0x94, 0x1d, 0xb1, 0x50, 0x48, 0xfc, 0xc7, 0xc8, 0xe9, 0x7e, 0xba, 0xb2, - 0xd1, 0x4f, 0x5b, 0x29, 0x4c, 0x3d, 0x97, 0x96, 0x5f, 0x35, 0xa5, 0xe5, 0x2b, 0x49, 0x69, 0xb9, - 0x4b, 0xf9, 0x6a, 0x08, 0xca, 0x83, 0x87, 0xbd, 0x1e, 0x34, 0x8e, 0x9c, 0xdd, 0x82, 0xcb, 0xfd, - 0xae, 0x25, 0x66, 0x08, 0xd5, 0x54, 0x4f, 0x6d, 0xb1, 0x21, 0x54, 0xb3, 0x5a, 0xc1, 0x0c, 0x32, - 0x68, 0xa0, 0x11, 0xfb, 0xbf, 0x5b, 0x90, 0xaf, 0xf9, 0xcd, 0x53, 0x50, 0x26, 0x7f, 0xd6, 0x50, - 0x26, 0x3f, 0x96, 0x91, 0x65, 0x3f, 0x53, 0x75, 0xbc, 0x9c, 0x50, 0x1d, 0x5f, 0xcc, 0x22, 0xd0, - 0x5b, 0x51, 0xfc, 0x13, 0x79, 0x18, 0xad, 0xf9, 0x4d, 0x65, 0x85, 0xfc, 0x1b, 0x0f, 0x62, 0x85, - 0x9c, 0x19, 0x16, 0x56, 0xa3, 0xcc, 0xec, 0xa7, 0xa4, 0x13, 0xde, 0x9f, 0x33, 0x63, 0xe4, 0x37, - 0x89, 0xbb, 0xb5, 0x1d, 0x91, 0x66, 0xf2, 0x73, 0x4e, 0xcf, 0x18, 0xf9, 0xbf, 0x5a, 0x30, 0x99, - 0x68, 0x1d, 0xb5, 0x60, 0xbc, 0xa5, 0x6b, 0x02, 0xc5, 0x3a, 0x7d, 0x20, 0x25, 0xa2, 0x30, 0xe6, - 0xd4, 0x8a, 0xb0, 0x49, 0x1c, 0xcd, 0x01, 0xa8, 0x97, 0x3a, 0xa9, 0x01, 0x63, 0x5c, 0xbf, 0x7a, - 0xca, 0x0b, 0xb1, 0x86, 0x81, 0x5e, 0x84, 0xd1, 0xc8, 0x6f, 0xfb, 0x2d, 0x7f, 0x6b, 0xff, 0x26, - 0x91, 0xa1, 0x6d, 0x94, 0x89, 0xd6, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0x53, 0x79, 0xfe, 0xa1, - 0x5e, 0xe4, 0x7e, 0x6b, 0x4d, 0x7e, 0xb4, 0xd7, 0xe4, 0x37, 0x2c, 0x98, 0xa2, 0xad, 0x33, 0x73, - 0x11, 0x79, 0xd9, 0xaa, 0xf4, 0x3b, 0x56, 0x8f, 0xf4, 0x3b, 0x57, 0xe8, 0xd9, 0xd5, 0xf4, 0x3b, - 0x91, 0xd0, 0xa0, 0x69, 0x87, 0x13, 0x2d, 0xc5, 0x02, 0x2a, 0xf0, 0x48, 0x10, 0x08, 0x1f, 0x28, - 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x99, 0x9d, 0xa7, 0x90, 0x91, 0x9d, 0x87, 0x05, 0xea, 0x13, - 0x86, 0x05, 0x82, 0xed, 0xd1, 0x02, 0xf5, 0x49, 0x8b, 0x83, 0x18, 0xc7, 0xfe, 0xb9, 0x3c, 0x8c, - 0xd5, 0xfc, 0x66, 0xfc, 0x56, 0xf6, 0x82, 0xf1, 0x56, 0x76, 0x39, 0xf1, 0x56, 0x36, 0xa5, 0xe3, - 0x7e, 0xeb, 0x65, 0xec, 0xc3, 0x7a, 0x19, 0xfb, 0xe7, 0x16, 0x9b, 0xb5, 0xca, 0x5a, 0x5d, 0x64, - 0x07, 0x7e, 0x0e, 0x46, 0xd9, 0x81, 0xc4, 0x9c, 0xee, 0xe4, 0x03, 0x12, 0x0b, 0xbc, 0xbf, 0x16, - 0x17, 0x63, 0x1d, 0x07, 0x5d, 0x85, 0x62, 0x48, 0x9c, 0xa0, 0xb1, 0xad, 0xce, 0x38, 0xf1, 0xbc, - 0xc2, 0xcb, 0xb0, 0x82, 0xa2, 0x37, 0xe2, 0x18, 0x71, 0xf9, 0xec, 0x3c, 0xb7, 0x7a, 0x7f, 0xf8, - 0x16, 0xc9, 0x0e, 0x0c, 0x67, 0xbf, 0x09, 0xa8, 0x1b, 0x7f, 0x80, 0xe0, 0x48, 0x65, 0x33, 0x38, - 0x52, 0xa9, 0x2b, 0x30, 0xd2, 0x9f, 0x5a, 0x30, 0x51, 0xf3, 0x9b, 0x74, 0xeb, 0x7e, 0x33, 0xed, - 0x53, 0x3d, 0x40, 0xe6, 0x70, 0x8f, 0x00, 0x99, 0x7f, 0xcf, 0x82, 0x91, 0x9a, 0xdf, 0x3c, 0x05, - 0xbd, 0xfb, 0xab, 0xa6, 0xde, 0xfd, 0xd1, 0x8c, 0x25, 0x91, 0xa1, 0x6a, 0xff, 0x85, 0x3c, 0x8c, - 0xd3, 0x7e, 0xfa, 0x5b, 0x72, 0x96, 0x8c, 0x11, 0xb1, 0x06, 0x18, 0x11, 0xca, 0xe6, 0xfa, 0xad, - 0x96, 0x7f, 0x2f, 0x39, 0x63, 0x2b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x06, 0x8a, 0xed, 0x80, 0xec, - 0xb9, 0xbe, 0xe0, 0x1f, 0xb5, 0x57, 0x8c, 0x9a, 0x28, 0xc7, 0x0a, 0x83, 0xca, 0x5d, 0xa1, 0xeb, - 0x35, 0x88, 0x4c, 0xb2, 0x5d, 0x60, 0x79, 0xb8, 0x78, 0xe4, 0x6b, 0xad, 0x1c, 0x1b, 0x58, 0xe8, - 0x4d, 0x28, 0xb1, 0xff, 0xec, 0x44, 0x39, 0x7e, 0xde, 0x20, 0x91, 0x6e, 0x42, 0x10, 0xc0, 0x31, - 0x2d, 0x74, 0x0d, 0x20, 0x92, 0xd1, 0x91, 0x43, 0x11, 0xe3, 0x46, 0xf1, 0xda, 0x2a, 0x6e, 0x72, - 0x88, 0x35, 0x2c, 0xf4, 0x34, 0x94, 0x22, 0xc7, 0x6d, 0xdd, 0x72, 0x3d, 0x12, 0x32, 0x95, 0x73, - 0x5e, 0x66, 0x93, 0x10, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x87, 0x39, 0x80, 0xf3, 0xac, 0x63, 0x45, - 0x86, 0xcd, 0x78, 0x9d, 0x5b, 0xaa, 0x14, 0x6b, 0x18, 0xf6, 0xcb, 0x70, 0xae, 0xe6, 0x37, 0x6b, - 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x3d, 0x27, 0x68, 0xca, 0xf9, 0x2b, 0xcb, 0xc4, 0x06, 0xf4, 0xec, - 0x19, 0xe2, 0x3b, 0xd3, 0x48, 0x59, 0xf0, 0x3c, 0xe3, 0x76, 0x8e, 0xe9, 0xd4, 0xd1, 0x60, 0xf7, - 0xae, 0x4a, 0x30, 0x78, 0xdd, 0x89, 0x08, 0xba, 0xcd, 0x92, 0x92, 0xc5, 0x57, 0x90, 0xa8, 0xfe, - 0x94, 0x96, 0x94, 0x2c, 0x06, 0xa6, 0xde, 0x59, 0x66, 0x7d, 0xfb, 0x57, 0xf3, 0xec, 0x34, 0x4a, - 0xe4, 0xdb, 0x43, 0x5f, 0x84, 0x89, 0x90, 0xdc, 0x72, 0xbd, 0xce, 0x7d, 0x29, 0x84, 0xf7, 0x70, - 0xcb, 0xa9, 0x2f, 0xeb, 0x98, 0x5c, 0x95, 0x67, 0x96, 0xe1, 0x04, 0x35, 0x3a, 0x4f, 0x41, 0xc7, - 0x5b, 0x08, 0xef, 0x84, 0x24, 0x10, 0xf9, 0xde, 0xd8, 0x3c, 0x61, 0x59, 0x88, 0x63, 0x38, 0x5d, - 0x97, 0xec, 0xcf, 0x9a, 0xef, 0x61, 0xdf, 0x8f, 0xe4, 0x4a, 0x66, 0x19, 0x83, 0xb4, 0x72, 0x6c, - 0x60, 0xa1, 0x15, 0x40, 0x61, 0xa7, 0xdd, 0x6e, 0xb1, 0x87, 0x7d, 0xa7, 0x75, 0x3d, 0xf0, 0x3b, - 0x6d, 0xfe, 0xea, 0x99, 0xe7, 0x81, 0x09, 0xeb, 0x5d, 0x50, 0x9c, 0x52, 0x83, 0x9e, 0x3e, 0x9b, - 0x21, 0xfb, 0xcd, 0x56, 0x77, 0x5e, 0xa8, 0xd7, 0xeb, 0xac, 0x08, 0x4b, 0x18, 0x5d, 0x4c, 0xac, - 0x79, 0x8e, 0x39, 0x1c, 0x2f, 0x26, 0xac, 0x4a, 0xb1, 0x86, 0x81, 0x96, 0x61, 0x24, 0xdc, 0x0f, - 0x1b, 0x91, 0x88, 0xc8, 0x94, 0x91, 0xb9, 0xb3, 0xce, 0x50, 0xb4, 0x6c, 0x12, 0xbc, 0x0a, 0x96, - 0x75, 0xed, 0xef, 0x61, 0x97, 0x21, 0xcb, 0x0e, 0x16, 0x75, 0x02, 0x82, 0x76, 0x61, 0xbc, 0xcd, - 0xa6, 0x5c, 0xc4, 0xae, 0x16, 0xf3, 0xf6, 0xc2, 0x80, 0x52, 0xed, 0x3d, 0x7a, 0xd0, 0x28, 0xad, - 0x13, 0x13, 0x17, 0x6a, 0x3a, 0x39, 0x6c, 0x52, 0xb7, 0xbf, 0x8a, 0xd8, 0x99, 0x5b, 0xe7, 0xa2, - 0xea, 0x88, 0x30, 0x2d, 0x16, 0x7c, 0xf9, 0x6c, 0xb6, 0xce, 0x24, 0xfe, 0x22, 0x61, 0x9e, 0x8c, - 0x65, 0x5d, 0xf4, 0x06, 0x7b, 0xa5, 0xe6, 0x07, 0x5d, 0xbf, 0x24, 0xcd, 0x1c, 0xcb, 0x78, 0x90, - 0x16, 0x15, 0xb1, 0x46, 0x04, 0xdd, 0x82, 0x71, 0x91, 0x4c, 0x4a, 0x28, 0xc5, 0xf2, 0x86, 0xd2, - 0x63, 0x1c, 0xeb, 0xc0, 0xa3, 0x64, 0x01, 0x36, 0x2b, 0xa3, 0x2d, 0xb8, 0xa8, 0x65, 0x56, 0xbc, - 0x1e, 0x38, 0xec, 0xe5, 0xd2, 0x65, 0x9b, 0x48, 0x3b, 0x37, 0x1f, 0x3f, 0x3c, 0x28, 0x5f, 0x5c, - 0xef, 0x85, 0x88, 0x7b, 0xd3, 0x41, 0xb7, 0xe1, 0x1c, 0xf7, 0xe0, 0xab, 0x10, 0xa7, 0xd9, 0x72, - 0x3d, 0x75, 0x30, 0xf3, 0x75, 0x78, 0xfe, 0xf0, 0xa0, 0x7c, 0x6e, 0x21, 0x0d, 0x01, 0xa7, 0xd7, - 0x43, 0xaf, 0x42, 0xa9, 0xe9, 0x85, 0x62, 0x0c, 0x86, 0x8d, 0xa4, 0xa1, 0xa5, 0xca, 0x5a, 0x5d, - 0x7d, 0x7f, 0xfc, 0x07, 0xc7, 0x15, 0xd0, 0x16, 0x57, 0x8c, 0x29, 0x39, 0x74, 0x24, 0x3b, 0x41, - 0xbc, 0x58, 0x12, 0x86, 0x0f, 0x0f, 0xd7, 0x08, 0x2b, 0x1b, 0x58, 0xc3, 0xbd, 0xc7, 0x20, 0x8c, - 0x5e, 0x07, 0x44, 0x19, 0x35, 0xb7, 0x41, 0x16, 0x1a, 0x2c, 0x84, 0x38, 0xd3, 0x23, 0x16, 0x0d, - 0x9f, 0x09, 0x54, 0xef, 0xc2, 0xc0, 0x29, 0xb5, 0xd0, 0x0d, 0x7a, 0x90, 0xe9, 0xa5, 0xc2, 0x96, - 0x57, 0x32, 0xf7, 0x33, 0x15, 0xd2, 0x0e, 0x48, 0xc3, 0x89, 0x48, 0xd3, 0xa4, 0x88, 0x13, 0xf5, - 0xe8, 0x5d, 0xaa, 0xb2, 0x09, 0x81, 0x19, 0x36, 0xa3, 0x3b, 0xa3, 0x10, 0x95, 0x8b, 0xb7, 0xfd, - 0x30, 0x5a, 0x23, 0xd1, 0x3d, 0x3f, 0xd8, 0x11, 0x51, 0xca, 0xe2, 0x80, 0x99, 0x31, 0x08, 0xeb, - 0x78, 0x94, 0x0f, 0x66, 0xcf, 0xc4, 0xd5, 0x0a, 0x7b, 0xa1, 0x2b, 0xc6, 0xfb, 0xe4, 0x06, 0x2f, - 0xc6, 0x12, 0x2e, 0x51, 0xab, 0xb5, 0x25, 0xf6, 0xda, 0x96, 0x40, 0xad, 0xd6, 0x96, 0xb0, 0x84, - 0x23, 0xd2, 0x9d, 0x90, 0x75, 0x22, 0x5b, 0xab, 0xd9, 0x7d, 0x1d, 0x0c, 0x98, 0x93, 0xd5, 0x83, - 0x29, 0x95, 0x0a, 0x96, 0x87, 0x6f, 0x0b, 0x67, 0x26, 0xd9, 0x22, 0x19, 0x3c, 0xf6, 0x9b, 0xd2, - 0x13, 0x57, 0x13, 0x94, 0x70, 0x17, 0x6d, 0x23, 0x90, 0xc9, 0x54, 0xdf, 0x6c, 0x50, 0xf3, 0x50, - 0x0a, 0x3b, 0x1b, 0x4d, 0x7f, 0xd7, 0x71, 0x3d, 0xf6, 0x38, 0xa6, 0x31, 0x59, 0x75, 0x09, 0xc0, - 0x31, 0x0e, 0x5a, 0x81, 0xa2, 0x23, 0x95, 0xc0, 0x28, 0x3b, 0x6a, 0x81, 0x52, 0xfd, 0x72, 0x47, - 0x5e, 0xa9, 0xf6, 0x55, 0x75, 0xd1, 0x2b, 0x30, 0x2e, 0xfc, 0xb6, 0x78, 0x2c, 0x07, 0xf6, 0x78, - 0xa5, 0x19, 0xe6, 0xd7, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x17, 0x60, 0x82, 0x52, 0x89, 0x0f, 0xb6, - 0x99, 0xb3, 0x83, 0x9c, 0x88, 0x5a, 0x96, 0x0f, 0xbd, 0x32, 0x4e, 0x10, 0x43, 0x4d, 0xb8, 0xe0, - 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, 0xbd, 0x61, 0x15, 0x17, - 0x2f, 0x1f, 0x1e, 0x94, 0x2f, 0x2c, 0xf4, 0xc0, 0xc3, 0x3d, 0xa9, 0xa0, 0x3b, 0x30, 0x1a, 0xf9, - 0x2d, 0x66, 0x22, 0x4f, 0x59, 0x89, 0x47, 0xb2, 0x03, 0x01, 0xad, 0x2b, 0x34, 0x5d, 0x89, 0xa4, - 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x85, 0x48, 0x25, 0xe1, 0xcc, 0xa3, 0xd9, 0x03, - 0xa3, 0x22, 0xa9, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, 0xa6, 0xdb, 0x81, 0xeb, - 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xc4, 0x0e, 0xb5, 0x24, 0x02, 0xee, 0xae, 0x43, 0x85, - 0x4c, 0x59, 0x38, 0x73, 0x9e, 0x67, 0x09, 0xe3, 0x8c, 0x37, 0x2f, 0xc3, 0x0a, 0x8a, 0x56, 0xd9, - 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x68, 0x0f, 0xba, 0xd8, 0xc8, 0xf9, 0x25, 0xf5, 0x17, - 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, 0x84, 0x5a, 0x54, 0xe6, - 0xc7, 0x78, 0x24, 0x47, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, 0x3d, 0xd4, 0xd4, 0x92, 0x63, - 0x53, 0x36, 0x34, 0x9c, 0xb9, 0xd0, 0xc3, 0xe0, 0x28, 0xc1, 0xb3, 0xc6, 0x6b, 0xd1, 0x28, 0x0e, - 0x71, 0x82, 0x26, 0xfa, 0x0e, 0x98, 0x12, 0x81, 0x8f, 0xe2, 0x71, 0xbf, 0x18, 0x5b, 0x32, 0xe2, - 0x04, 0x0c, 0x77, 0x61, 0xf3, 0x58, 0xd4, 0xce, 0x46, 0x8b, 0x88, 0x45, 0x78, 0xcb, 0xf5, 0x76, - 0xc2, 0x99, 0x4b, 0xec, 0xab, 0x45, 0x2c, 0xea, 0x24, 0x14, 0xa7, 0xd4, 0x98, 0xfd, 0x76, 0x98, - 0xee, 0xba, 0xb9, 0x8e, 0x15, 0xbf, 0xfd, 0x4f, 0x86, 0xa0, 0xa4, 0x94, 0xf2, 0x68, 0xde, 0x7c, - 0x6b, 0x39, 0x9f, 0x7c, 0x6b, 0x29, 0x52, 0xd9, 0x40, 0x7f, 0x5e, 0x59, 0x37, 0x0c, 0xf5, 0x72, - 0xd9, 0xd9, 0xd2, 0x74, 0xee, 0xbe, 0xaf, 0xd3, 0x9f, 0xa6, 0x63, 0xc9, 0x0f, 0xfc, 0x68, 0x53, - 0xe8, 0xa9, 0xb6, 0x19, 0x30, 0x59, 0x31, 0x7a, 0x82, 0x0a, 0x48, 0xcd, 0x6a, 0x2d, 0x99, 0xbd, - 0xb3, 0x46, 0x0b, 0x31, 0x87, 0x31, 0x41, 0x92, 0xb2, 0x59, 0x4c, 0x90, 0x1c, 0x79, 0x40, 0x41, - 0x52, 0x12, 0xc0, 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x78, 0x55, 0x39, 0xfa, 0x3d, 0xd1, - 0x37, 0x05, 0x6a, 0x47, 0xcb, 0x72, 0xb7, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0x57, 0xa0, 0xf8, - 0x9e, 0x1f, 0xb2, 0x45, 0x29, 0x78, 0x0d, 0xe9, 0x10, 0x55, 0x7c, 0xe3, 0x76, 0x9d, 0x95, 0x1f, - 0x1d, 0x94, 0x47, 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0xba, 0x0f, 0xe7, 0x8c, 0x13, 0x5a, - 0x75, 0x17, 0x06, 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, - 0xf6, 0x3c, 0x5f, 0x24, 0x2d, 0x96, 0xfc, 0x0c, 0x63, 0x5b, 0x4a, 0xba, 0x3b, 0x7c, 0x02, 0x01, - 0x77, 0xd7, 0xb1, 0x7f, 0x99, 0xbf, 0x61, 0x08, 0x4d, 0x27, 0x09, 0x3b, 0xad, 0xd3, 0xc8, 0x89, - 0xb5, 0x6c, 0x28, 0x61, 0x1f, 0xf8, 0x9d, 0xec, 0xd7, 0x2d, 0xf6, 0x4e, 0xb6, 0x4e, 0x76, 0xdb, - 0x2d, 0x2a, 0x6f, 0x3f, 0xfc, 0x8e, 0xbf, 0x01, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x8d, 0x97, 0xd6, - 0x29, 0xf6, 0x56, 0xa8, 0x38, 0x1d, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf2, 0x19, 0x90, 0x90, - 0x53, 0x50, 0x88, 0x55, 0x4c, 0x85, 0x58, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xc6, 0xfe, 0x89, 0xd9, - 0x6f, 0x26, 0x54, 0x7e, 0xd4, 0x1f, 0x68, 0xed, 0x1f, 0xb6, 0xe0, 0x6c, 0x9a, 0x45, 0x13, 0xe5, - 0x4e, 0xb9, 0x48, 0xab, 0x1e, 0xac, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0x21, - 0xe3, 0x78, 0x11, 0xe3, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x1a, 0xf7, 0xac, 0xe3, - 0xfd, 0x79, 0xe6, 0xd8, 0x5e, 0x75, 0xf6, 0xcf, 0xe4, 0xe0, 0x2c, 0x7f, 0x71, 0x5a, 0xd8, 0xf3, - 0xdd, 0x66, 0xcd, 0x6f, 0x8a, 0xec, 0x26, 0x6f, 0xc1, 0x58, 0x5b, 0xd3, 0x43, 0xf4, 0x8a, 0x59, - 0xa5, 0xeb, 0x2b, 0x62, 0x79, 0x50, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, - 0xd4, 0xb3, 0x45, 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x0f, 0x21, - 0xe1, 0x9d, 0xfd, 0x23, 0x16, 0x3c, 0x9a, 0x11, 0xe1, 0x8a, 0x36, 0x77, 0x8f, 0xbd, 0xed, 0x89, - 0xdc, 0x59, 0xaa, 0x39, 0xfe, 0xe2, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xd8, 0x51, 0xf1, - 0xa8, 0x5f, 0x28, 0x20, 0x23, 0x8a, 0x89, 0x16, 0x7d, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0x27, - 0xf3, 0x30, 0xc4, 0x5e, 0x88, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x98, 0xcf, 0x83, 0x84, 0x97, 0x8e, - 0xe5, 0x4c, 0x5e, 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0xcc, 0xec, 0x56, 0x85, 0xb4, 0x9c, - 0x7d, 0xa9, 0xae, 0xe0, 0xf9, 0xa6, 0x54, 0x24, 0x8d, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0x5e, - 0x83, 0x09, 0xca, 0xdf, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xd1, 0xb2, 0x15, 0x43, 0xb9, 0x6e, 0x40, - 0x71, 0x02, 0x9b, 0x0a, 0x5e, 0xed, 0x2e, 0xc5, 0xcc, 0x50, 0x2c, 0x78, 0x99, 0xca, 0x18, 0x13, - 0x97, 0x99, 0x32, 0x75, 0x98, 0xe1, 0xd6, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xe9, - 0xca, 0x63, 0x53, 0xa6, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, - 0x4c, 0x65, 0xd8, 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xf9, 0xc3, - 0xa5, 0x7f, 0xbf, 0xb2, 0x4f, 0x1b, 0x91, 0x9e, 0x4e, 0x3d, 0x02, 0xdc, 0x08, 0x0b, 0x1e, 0x95, - 0x81, 0x5c, 0xd3, 0x27, 0x0a, 0x1f, 0x27, 0x49, 0xe5, 0x41, 0xb2, 0x58, 0xff, 0x40, 0x0e, 0xce, - 0xa4, 0xd8, 0xc1, 0xf2, 0xa3, 0x6a, 0xcb, 0x0d, 0x23, 0x95, 0x53, 0x47, 0x3b, 0xaa, 0x78, 0x39, - 0x56, 0x18, 0x74, 0x3f, 0xf0, 0xc3, 0x30, 0x79, 0x00, 0x0a, 0x3b, 0x33, 0x01, 0x3d, 0x66, 0x76, - 0x9a, 0xcb, 0x50, 0xe8, 0x84, 0x44, 0x86, 0xa6, 0x52, 0xe7, 0x37, 0xd3, 0x30, 0x33, 0x08, 0x65, - 0x4d, 0xb7, 0x94, 0x72, 0x57, 0x63, 0x4d, 0xb9, 0xc6, 0x96, 0xc3, 0x68, 0xe7, 0x22, 0xe2, 0x39, - 0x5e, 0x24, 0x18, 0xd8, 0x38, 0xa0, 0x0a, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0x25, 0x0f, 0xe7, 0x33, - 0x2d, 0xe3, 0x69, 0xd7, 0x77, 0x7d, 0xcf, 0x8d, 0x7c, 0xf5, 0x4a, 0xc9, 0x83, 0xa8, 0x90, 0xf6, - 0xf6, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x91, 0x19, 0xef, 0x93, 0xd9, 0x85, 0x16, 0x2b, 0x46, - 0xd2, 0xfb, 0x41, 0x33, 0xb7, 0x3d, 0x01, 0x85, 0xb6, 0xef, 0xb7, 0x92, 0x87, 0x16, 0xed, 0xae, - 0xef, 0xb7, 0x30, 0x03, 0xa2, 0x4f, 0x88, 0xf1, 0x4a, 0x3c, 0xcb, 0x61, 0xa7, 0xe9, 0x87, 0xda, - 0xa0, 0x3d, 0x05, 0x23, 0x3b, 0x64, 0x3f, 0x70, 0xbd, 0xad, 0xe4, 0x73, 0xed, 0x4d, 0x5e, 0x8c, - 0x25, 0xdc, 0xcc, 0x35, 0x31, 0x72, 0xd2, 0x29, 0xd7, 0x8a, 0x7d, 0xaf, 0xc0, 0x1f, 0xcc, 0xc3, - 0x24, 0x5e, 0xac, 0x7c, 0x6b, 0x22, 0xee, 0x74, 0x4f, 0xc4, 0x49, 0xa7, 0x5c, 0xeb, 0x3f, 0x1b, - 0xbf, 0x60, 0xc1, 0x24, 0x8b, 0xc7, 0x2c, 0x42, 0x77, 0xb8, 0xbe, 0x77, 0x0a, 0x2c, 0xde, 0x13, - 0x30, 0x14, 0xd0, 0x46, 0x93, 0x69, 0x85, 0x58, 0x4f, 0x30, 0x87, 0xa1, 0x0b, 0x50, 0x60, 0x5d, - 0xa0, 0x93, 0x37, 0xc6, 0x33, 0x32, 0x54, 0x9c, 0xc8, 0xc1, 0xac, 0x94, 0xf9, 0xa3, 0x63, 0xd2, - 0x6e, 0xb9, 0xbc, 0xd3, 0xf1, 0x13, 0xc8, 0x47, 0xc3, 0x1f, 0x3d, 0xb5, 0x6b, 0x1f, 0xcc, 0x1f, - 0x3d, 0x9d, 0x64, 0x6f, 0xf1, 0xe9, 0x0f, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0xfe, 0xe8, 0xbd, - 0x6b, 0x9f, 0x8c, 0xd5, 0x4d, 0xba, 0x31, 0x4c, 0xfe, 0x14, 0x8d, 0x61, 0x0a, 0x83, 0x72, 0x98, - 0x43, 0x03, 0xb8, 0x89, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x4d, 0x3c, 0xb5, 0x6f, 0x19, 0xe2, 0xdf, - 0x9f, 0xe5, 0x32, 0xbe, 0x85, 0x09, 0x82, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x98, 0xc7, - 0xf8, 0x19, 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0xb8, 0xce, 0x65, 0x67, 0xd9, 0xcc, 0x6c, - 0x6a, 0xce, 0x7c, 0xb1, 0x52, 0x43, 0x90, 0xe2, 0x7c, 0xbd, 0xaa, 0x09, 0xef, 0xf9, 0xc1, 0x85, - 0xf7, 0xb1, 0x74, 0xc1, 0x1d, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xd1, 0x63, 0x73, 0xdf, 0x64, 0x59, - 0x55, 0xfc, 0x91, 0x55, 0x13, 0x8c, 0x93, 0xf8, 0xb3, 0xaf, 0xc0, 0xf8, 0x83, 0xab, 0x2d, 0xbf, - 0x91, 0x87, 0xc7, 0x7a, 0x6c, 0x7b, 0x7e, 0xd6, 0x1b, 0x73, 0xa0, 0x9d, 0xf5, 0x5d, 0xf3, 0x50, - 0x83, 0xb3, 0x9b, 0x9d, 0x56, 0x6b, 0x9f, 0xd9, 0x9b, 0x92, 0xa6, 0xc4, 0x10, 0x3c, 0xe5, 0x05, - 0x99, 0x03, 0x63, 0x25, 0x05, 0x07, 0xa7, 0xd6, 0x44, 0xaf, 0x03, 0xf2, 0x45, 0x8a, 0xdf, 0xeb, - 0xc4, 0x13, 0xef, 0x00, 0x6c, 0xe0, 0xf3, 0xf1, 0x66, 0xbc, 0xdd, 0x85, 0x81, 0x53, 0x6a, 0x51, - 0xe1, 0x80, 0xde, 0x4a, 0xfb, 0xaa, 0x5b, 0x09, 0xe1, 0x00, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xeb, - 0x30, 0xed, 0xec, 0x39, 0x2e, 0x8f, 0xcb, 0x27, 0x09, 0x70, 0xe9, 0x40, 0x29, 0xcb, 0x16, 0x92, - 0x08, 0xb8, 0xbb, 0x4e, 0xc2, 0x25, 0x7b, 0x38, 0xdb, 0x25, 0xbb, 0xf7, 0xb9, 0xd8, 0x4f, 0xf7, - 0x6b, 0xff, 0x27, 0x8b, 0x5e, 0x5f, 0x29, 0x69, 0xfa, 0xe9, 0x38, 0x28, 0x1d, 0xa6, 0xe6, 0x1d, - 0x7d, 0x4e, 0xb3, 0x28, 0x89, 0x81, 0xd8, 0xc4, 0xe5, 0x0b, 0x22, 0x8c, 0x9d, 0x72, 0x0c, 0x16, - 0x5f, 0x44, 0x57, 0x50, 0x18, 0xe8, 0xf3, 0x30, 0xd2, 0x74, 0xf7, 0xdc, 0xd0, 0x0f, 0xc4, 0x66, - 0x39, 0xa6, 0x6b, 0x43, 0x7c, 0x0e, 0x56, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x98, 0x83, 0x71, - 0xd9, 0xe2, 0x1b, 0x1d, 0x3f, 0x72, 0x4e, 0xe1, 0x5a, 0xbe, 0x6e, 0x5c, 0xcb, 0x9f, 0xe8, 0x15, - 0x62, 0x82, 0x75, 0x29, 0xf3, 0x3a, 0xbe, 0x9d, 0xb8, 0x8e, 0x9f, 0xec, 0x4f, 0xaa, 0xf7, 0x35, - 0xfc, 0xcf, 0x2c, 0x98, 0x36, 0xf0, 0x4f, 0xe1, 0x36, 0x58, 0x31, 0x6f, 0x83, 0xc7, 0xfb, 0x7e, - 0x43, 0xc6, 0x2d, 0xf0, 0x7d, 0xf9, 0x44, 0xdf, 0xd9, 0xe9, 0xff, 0x1e, 0x14, 0xb6, 0x9d, 0xa0, - 0xd9, 0x2b, 0x94, 0x6d, 0x57, 0xa5, 0xb9, 0x1b, 0x4e, 0xd0, 0xe4, 0x67, 0xf8, 0x33, 0x2a, 0x4f, - 0xa6, 0x13, 0x34, 0xfb, 0xfa, 0xa0, 0xb1, 0xa6, 0xd0, 0xcb, 0x30, 0x1c, 0x36, 0xfc, 0xb6, 0xb2, - 0x10, 0xbd, 0xcc, 0x73, 0x68, 0xd2, 0x92, 0xa3, 0x83, 0x32, 0x32, 0x9b, 0xa3, 0xc5, 0x58, 0xe0, - 0xa3, 0xb7, 0x60, 0x9c, 0xfd, 0x52, 0x96, 0x12, 0xf9, 0xec, 0x04, 0x0a, 0x75, 0x1d, 0x91, 0x1b, - 0xdc, 0x18, 0x45, 0xd8, 0x24, 0x35, 0xbb, 0x05, 0x25, 0xf5, 0x59, 0x0f, 0xd5, 0x77, 0xe8, 0xdf, - 0xe7, 0xe1, 0x4c, 0xca, 0x9a, 0x43, 0xa1, 0x31, 0x13, 0xcf, 0x0d, 0xb8, 0x54, 0x3f, 0xe0, 0x5c, - 0x84, 0x4c, 0x1a, 0x6a, 0x8a, 0xb5, 0x35, 0x70, 0xa3, 0x77, 0x42, 0x92, 0x6c, 0x94, 0x16, 0xf5, - 0x6f, 0x94, 0x36, 0x76, 0x6a, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0xc7, 0x79, - 0x38, 0x9b, 0x16, 0xf5, 0x06, 0x7d, 0x77, 0x22, 0x99, 0xce, 0x0b, 0x83, 0xc6, 0xcb, 0xe1, 0x19, - 0x76, 0x44, 0x2e, 0xec, 0x39, 0x33, 0xbd, 0x4e, 0xdf, 0x61, 0x16, 0x6d, 0x32, 0x87, 0xd3, 0x80, - 0x27, 0x41, 0x92, 0xc7, 0xc7, 0xa7, 0x07, 0xee, 0x80, 0xc8, 0x9e, 0x14, 0x26, 0x1c, 0x4e, 0x65, - 0x71, 0x7f, 0x87, 0x53, 0xd9, 0xf2, 0xac, 0x0b, 0xa3, 0xda, 0xd7, 0x3c, 0xd4, 0x19, 0xdf, 0xa1, - 0xb7, 0x95, 0xd6, 0xef, 0x87, 0x3a, 0xeb, 0x3f, 0x62, 0x41, 0xc2, 0x1c, 0x53, 0xa9, 0xc5, 0xac, - 0x4c, 0xb5, 0xd8, 0x65, 0x28, 0x04, 0x7e, 0x8b, 0x24, 0x73, 0xd7, 0x60, 0xbf, 0x45, 0x30, 0x83, - 0x50, 0x8c, 0x28, 0x56, 0x76, 0x8c, 0xe9, 0x82, 0x9c, 0x10, 0xd1, 0x9e, 0x80, 0xa1, 0x16, 0xd9, - 0x23, 0xad, 0x64, 0x60, 0xf8, 0x5b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x0b, 0x05, 0xb8, 0xd8, 0xd3, - 0x65, 0x9b, 0x8a, 0x43, 0x5b, 0x4e, 0x44, 0xee, 0x39, 0xfb, 0xc9, 0x08, 0xce, 0xd7, 0x79, 0x31, - 0x96, 0x70, 0x66, 0xa1, 0xce, 0x23, 0x36, 0x26, 0x94, 0x88, 0x22, 0x50, 0xa3, 0x80, 0x9a, 0x4a, - 0xa9, 0xfc, 0x49, 0x28, 0xa5, 0xae, 0x01, 0x84, 0x61, 0x8b, 0xdb, 0x17, 0x34, 0x85, 0xe9, 0x7b, - 0x1c, 0xd9, 0xb3, 0x7e, 0x4b, 0x40, 0xb0, 0x86, 0x85, 0x2a, 0x30, 0xd5, 0x0e, 0xfc, 0x88, 0xeb, - 0x64, 0x2b, 0xdc, 0x30, 0x69, 0xc8, 0xf4, 0x96, 0xad, 0x25, 0xe0, 0xb8, 0xab, 0x06, 0x7a, 0x11, - 0x46, 0x85, 0x07, 0x6d, 0xcd, 0xf7, 0x5b, 0x42, 0x0d, 0xa4, 0xcc, 0x5c, 0xea, 0x31, 0x08, 0xeb, - 0x78, 0x5a, 0x35, 0xa6, 0xe8, 0x1d, 0x49, 0xad, 0xc6, 0x95, 0xbd, 0x1a, 0x5e, 0x22, 0x02, 0x56, - 0x71, 0xa0, 0x08, 0x58, 0xb1, 0x62, 0xac, 0x34, 0xf0, 0xdb, 0x16, 0xf4, 0x55, 0x25, 0xfd, 0x6c, - 0x01, 0xce, 0x88, 0x85, 0xf3, 0xb0, 0x97, 0xcb, 0x9d, 0xee, 0xe5, 0x72, 0x12, 0xaa, 0xb3, 0x6f, - 0xad, 0x99, 0xd3, 0x5e, 0x33, 0x3f, 0x64, 0x81, 0xc9, 0x5e, 0xa1, 0xff, 0x2f, 0x33, 0x04, 0xfe, - 0x8b, 0x99, 0xec, 0x5a, 0x53, 0x5e, 0x20, 0x1f, 0x30, 0x18, 0xbe, 0xfd, 0x1f, 0x2d, 0x78, 0xbc, - 0x2f, 0x45, 0xb4, 0x0c, 0x25, 0xc6, 0x03, 0x6a, 0xd2, 0xd9, 0x93, 0xca, 0x70, 0x51, 0x02, 0x32, - 0x58, 0xd2, 0xb8, 0x26, 0x5a, 0xee, 0xca, 0x35, 0xf0, 0x54, 0x4a, 0xae, 0x81, 0x73, 0xc6, 0xf0, - 0x3c, 0x60, 0xb2, 0x81, 0x5f, 0xce, 0xc3, 0x30, 0x5f, 0xf1, 0xa7, 0x20, 0x86, 0xad, 0x08, 0xbd, - 0x6d, 0x8f, 0x18, 0x58, 0xbc, 0x2f, 0x73, 0x15, 0x27, 0x72, 0x38, 0x9b, 0xa0, 0x6e, 0xab, 0x58, - 0xc3, 0x8b, 0xe6, 0x8c, 0xfb, 0x6c, 0x36, 0xa1, 0x98, 0x04, 0x4e, 0x43, 0xbb, 0xdd, 0xbe, 0x08, - 0x10, 0xb2, 0x3c, 0xfd, 0x94, 0x86, 0x88, 0xa6, 0xf6, 0xc9, 0x1e, 0xad, 0xd7, 0x15, 0x32, 0xef, - 0x43, 0xbc, 0xd3, 0x15, 0x00, 0x6b, 0x14, 0x67, 0x5f, 0x82, 0x92, 0x42, 0xee, 0xa7, 0xc5, 0x19, - 0xd3, 0x99, 0x8b, 0xcf, 0xc2, 0x64, 0xa2, 0xad, 0x63, 0x29, 0x81, 0x7e, 0xd1, 0x82, 0x49, 0xde, - 0xe5, 0x65, 0x6f, 0x4f, 0x9c, 0xa9, 0xef, 0xc3, 0xd9, 0x56, 0xca, 0xd9, 0x26, 0x66, 0x74, 0xf0, - 0xb3, 0x50, 0x29, 0x7d, 0xd2, 0xa0, 0x38, 0xb5, 0x0d, 0x74, 0x95, 0xae, 0x5b, 0x7a, 0x76, 0x39, - 0x2d, 0xe1, 0xed, 0x34, 0xc6, 0xd7, 0x2c, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05, 0xd3, 0xbc, - 0xe7, 0x37, 0xc9, 0xbe, 0xda, 0xe1, 0x1f, 0x66, 0xdf, 0x45, 0xfa, 0x8f, 0x5c, 0x46, 0xfa, 0x0f, - 0xfd, 0xd3, 0xf2, 0x3d, 0x3f, 0xed, 0x67, 0x2c, 0x10, 0x2b, 0xf0, 0x14, 0x44, 0xf9, 0x6f, 0x37, - 0x45, 0xf9, 0xd9, 0xec, 0x45, 0x9d, 0x21, 0xc3, 0xff, 0xa9, 0x05, 0x53, 0x1c, 0x21, 0x7e, 0x73, - 0xfe, 0x50, 0xe7, 0x61, 0x90, 0x3c, 0x7e, 0x2a, 0xb9, 0x77, 0xfa, 0x47, 0x19, 0x93, 0x55, 0xe8, - 0x39, 0x59, 0x4d, 0xb9, 0x81, 0x8e, 0x91, 0xc3, 0xf2, 0xd8, 0x61, 0xb4, 0xed, 0x3f, 0xb0, 0x00, - 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, 0x15, 0xac, 0x54, 0xbb, 0x2e, 0xe2, 0xa3, 0x46, 0x41, 0xb0, - 0x86, 0x75, 0x22, 0xc3, 0x93, 0x30, 0x1c, 0xc8, 0xf7, 0x37, 0x1c, 0x38, 0xc6, 0x88, 0xfe, 0xef, - 0x02, 0x24, 0xdd, 0x0f, 0xd0, 0x5d, 0x18, 0x6b, 0x38, 0x6d, 0x67, 0xc3, 0x6d, 0xb9, 0x91, 0x4b, - 0xc2, 0x5e, 0x16, 0x47, 0x4b, 0x1a, 0x9e, 0x78, 0xea, 0xd5, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x01, - 0xb4, 0x03, 0x77, 0xcf, 0x6d, 0x91, 0x2d, 0xa6, 0x71, 0x60, 0xfe, 0x95, 0xdc, 0x8c, 0x46, 0x96, - 0x62, 0x0d, 0x23, 0xc5, 0x55, 0x2e, 0xff, 0xf0, 0x5c, 0xe5, 0x0a, 0xc7, 0x74, 0x95, 0x1b, 0x1a, - 0xc8, 0x55, 0x0e, 0xc3, 0x23, 0x92, 0x45, 0xa2, 0xff, 0x57, 0xdc, 0x16, 0x11, 0x7c, 0x31, 0xf7, - 0xba, 0x9c, 0x3d, 0x3c, 0x28, 0x3f, 0x82, 0x53, 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, - 0x5a, 0x2d, 0xff, 0x9e, 0x1a, 0xb5, 0xe5, 0xb0, 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0xbd, - 0x70, 0x78, 0x50, 0x9e, 0x59, 0xc8, 0xc0, 0xc1, 0x99, 0xb5, 0x13, 0x9e, 0x76, 0xc5, 0xbe, 0x9e, - 0x76, 0xaf, 0x42, 0xa9, 0x1d, 0xf8, 0x8d, 0x55, 0xcd, 0xfb, 0xe7, 0x12, 0xcb, 0x90, 0x2f, 0x0b, - 0x8f, 0x0e, 0xca, 0xe3, 0xea, 0x0f, 0xbb, 0xe1, 0xe3, 0x0a, 0xf6, 0x0e, 0x9c, 0xa9, 0x93, 0xc0, - 0x65, 0xb9, 0x37, 0x9b, 0xf1, 0x86, 0x5e, 0x87, 0x52, 0x90, 0x38, 0xc2, 0x06, 0x0a, 0xe4, 0xa4, - 0xc5, 0x17, 0x96, 0x47, 0x56, 0x4c, 0xc8, 0xfe, 0x13, 0x0b, 0x46, 0x84, 0x25, 0xfa, 0x29, 0x70, - 0x4e, 0x0b, 0x86, 0x02, 0xbb, 0x9c, 0x7e, 0xcc, 0xb3, 0xce, 0x64, 0xaa, 0xae, 0xab, 0x09, 0xd5, - 0xf5, 0xe3, 0xbd, 0x88, 0xf4, 0x56, 0x5a, 0xff, 0xad, 0x3c, 0x4c, 0x98, 0xce, 0x23, 0xa7, 0x30, - 0x04, 0x6b, 0x30, 0x12, 0x0a, 0x4f, 0xa5, 0x5c, 0xb6, 0x85, 0x75, 0x72, 0x12, 0x63, 0xf3, 0x29, - 0xe1, 0x9b, 0x24, 0x89, 0xa4, 0xba, 0x40, 0xe5, 0x1f, 0xa2, 0x0b, 0x54, 0x3f, 0xff, 0x9d, 0xc2, - 0x49, 0xf8, 0xef, 0xd8, 0x5f, 0x63, 0x57, 0x8d, 0x5e, 0x7e, 0x0a, 0x5c, 0xc8, 0x75, 0xf3, 0x52, - 0xb2, 0x7b, 0xac, 0x2c, 0xd1, 0xa9, 0x0c, 0x6e, 0xe4, 0xe7, 0x2d, 0xb8, 0x98, 0xf2, 0x55, 0x1a, - 0x6b, 0xf2, 0x0c, 0x14, 0x9d, 0x4e, 0xd3, 0x55, 0x7b, 0x59, 0x7b, 0xc6, 0x5a, 0x10, 0xe5, 0x58, - 0x61, 0xa0, 0x25, 0x98, 0x26, 0xf7, 0xdb, 0x2e, 0x7f, 0x47, 0xd4, 0x6d, 0x1c, 0xf3, 0x3c, 0xb8, - 0xed, 0x72, 0x12, 0x88, 0xbb, 0xf1, 0x95, 0xfb, 0x77, 0x3e, 0xd3, 0xfd, 0xfb, 0x1f, 0x5a, 0x30, - 0xaa, 0xbc, 0x52, 0x1e, 0xfa, 0x68, 0x7f, 0x87, 0x39, 0xda, 0x8f, 0xf5, 0x18, 0xed, 0x8c, 0x61, - 0xfe, 0x3b, 0x39, 0xd5, 0xdf, 0x9a, 0x1f, 0x44, 0x03, 0xb0, 0x3c, 0x2f, 0x43, 0xb1, 0x1d, 0xf8, - 0x91, 0xdf, 0xf0, 0x5b, 0x82, 0xe3, 0xb9, 0x10, 0x47, 0x27, 0xe0, 0xe5, 0x47, 0xda, 0x6f, 0xac, - 0xb0, 0xd9, 0xe8, 0xf9, 0x41, 0x24, 0xb8, 0x8c, 0x78, 0xf4, 0xfc, 0x20, 0xc2, 0x0c, 0x82, 0x9a, - 0x00, 0x91, 0x13, 0x6c, 0x91, 0x88, 0x96, 0x89, 0x40, 0x27, 0xd9, 0x87, 0x47, 0x27, 0x72, 0x5b, - 0x73, 0xae, 0x17, 0x85, 0x51, 0x30, 0x57, 0xf5, 0xa2, 0xdb, 0x01, 0x17, 0xa0, 0xb4, 0x70, 0x03, - 0x8a, 0x16, 0xd6, 0xe8, 0x4a, 0x9f, 0x50, 0xd6, 0xc6, 0x90, 0xf9, 0x20, 0xbe, 0x26, 0xca, 0xb1, - 0xc2, 0xb0, 0x5f, 0x62, 0x57, 0x09, 0x1b, 0xa0, 0xe3, 0x45, 0x02, 0xf8, 0x7a, 0x51, 0x0d, 0x2d, - 0x7b, 0x0d, 0xab, 0xe8, 0xf1, 0x06, 0x7a, 0x9f, 0xdc, 0xb4, 0x61, 0xdd, 0xdf, 0x26, 0x0e, 0x4a, - 0x80, 0xbe, 0xb3, 0xcb, 0x4e, 0xe2, 0xd9, 0x3e, 0x57, 0xc0, 0x31, 0x2c, 0x23, 0x58, 0xc0, 0x6d, - 0x16, 0x8e, 0xb8, 0x5a, 0x13, 0x8b, 0x5c, 0x0b, 0xb8, 0x2d, 0x00, 0x38, 0xc6, 0x41, 0xf3, 0x42, - 0xfc, 0x2e, 0x18, 0x69, 0xf7, 0xa4, 0xf8, 0x2d, 0x3f, 0x5f, 0x93, 0xbf, 0x9f, 0x83, 0x51, 0x95, - 0x7e, 0xaf, 0xc6, 0xb3, 0x98, 0x89, 0xb0, 0x2f, 0xcb, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x3a, 0x4c, - 0x86, 0x5c, 0xf7, 0xa2, 0xa2, 0xfb, 0x71, 0x1d, 0xd6, 0x27, 0xa5, 0x7d, 0x45, 0xdd, 0x04, 0x1f, - 0xb1, 0x22, 0x7e, 0x74, 0x48, 0xc7, 0xce, 0x24, 0x09, 0xf4, 0x1a, 0x4c, 0xb4, 0xf4, 0x44, 0xf7, - 0x35, 0xa1, 0xe2, 0x52, 0x66, 0xca, 0x46, 0x1a, 0xfc, 0x1a, 0x4e, 0x60, 0x53, 0x4e, 0x49, 0x2f, - 0x11, 0x11, 0x29, 0x1d, 0x6f, 0x8b, 0x84, 0x22, 0x79, 0x18, 0xe3, 0x94, 0x6e, 0x65, 0xe0, 0xe0, - 0xcc, 0xda, 0xe8, 0x65, 0x18, 0x93, 0x9f, 0xaf, 0xb9, 0x2d, 0xc7, 0xc6, 0xf0, 0x1a, 0x0c, 0x1b, - 0x98, 0xe8, 0x1e, 0x9c, 0x93, 0xff, 0xd7, 0x03, 0x67, 0x73, 0xd3, 0x6d, 0x08, 0xaf, 0x71, 0xee, - 0x11, 0xb4, 0x20, 0x5d, 0x8c, 0x96, 0xd3, 0x90, 0x8e, 0x0e, 0xca, 0x97, 0xc5, 0xa8, 0xa5, 0xc2, - 0xd9, 0x24, 0xa6, 0xd3, 0x47, 0xab, 0x70, 0x66, 0x9b, 0x38, 0xad, 0x68, 0x7b, 0x69, 0x9b, 0x34, - 0x76, 0xe4, 0x26, 0x62, 0xce, 0xd0, 0x9a, 0x09, 0xf9, 0x8d, 0x6e, 0x14, 0x9c, 0x56, 0x0f, 0xbd, - 0x0d, 0x33, 0xed, 0xce, 0x46, 0xcb, 0x0d, 0xb7, 0xd7, 0xfc, 0x88, 0x99, 0x74, 0xa8, 0xec, 0x75, - 0xc2, 0x6b, 0x5a, 0x39, 0x82, 0xd7, 0x32, 0xf0, 0x70, 0x26, 0x05, 0xf4, 0x3e, 0x9c, 0x4b, 0x2c, - 0x06, 0xe1, 0xc3, 0x39, 0x91, 0x1d, 0xdf, 0xb7, 0x9e, 0x56, 0x41, 0xf8, 0x64, 0xa6, 0x81, 0x70, - 0x7a, 0x13, 0x1f, 0xcc, 0xd0, 0xe7, 0x3d, 0x5a, 0x59, 0x63, 0xca, 0xd0, 0x3b, 0x30, 0xa6, 0xaf, - 0x22, 0x71, 0xc1, 0x5c, 0x49, 0xe7, 0x59, 0xb4, 0xd5, 0xc6, 0x59, 0x3a, 0xb5, 0xa2, 0x74, 0x18, - 0x36, 0x28, 0xda, 0x04, 0xd2, 0xbf, 0x0f, 0xdd, 0x82, 0x62, 0xa3, 0xe5, 0x12, 0x2f, 0xaa, 0xd6, - 0x7a, 0x05, 0x19, 0x59, 0x12, 0x38, 0x62, 0xc0, 0x44, 0x40, 0x54, 0x5e, 0x86, 0x15, 0x05, 0xfb, - 0xd7, 0x72, 0x50, 0xee, 0x13, 0x5d, 0x37, 0xa1, 0x8f, 0xb6, 0x06, 0xd2, 0x47, 0x2f, 0xc8, 0x5c, - 0x7c, 0x6b, 0x09, 0x21, 0x3d, 0x91, 0x67, 0x2f, 0x16, 0xd5, 0x93, 0xf8, 0x03, 0xdb, 0x07, 0xeb, - 0x2a, 0xed, 0x42, 0x5f, 0x0b, 0x77, 0xe3, 0x29, 0x6b, 0x68, 0x70, 0x41, 0x24, 0xf3, 0x59, 0xc2, - 0xfe, 0x5a, 0x0e, 0xce, 0xa9, 0x21, 0xfc, 0xe6, 0x1d, 0xb8, 0x3b, 0xdd, 0x03, 0x77, 0x02, 0x8f, - 0x3a, 0xf6, 0x6d, 0x18, 0xe6, 0x41, 0x5a, 0x06, 0x60, 0x80, 0x9e, 0x30, 0x23, 0x7a, 0xa9, 0x6b, - 0xda, 0x88, 0xea, 0xf5, 0x97, 0x2d, 0x98, 0x5c, 0x5f, 0xaa, 0xd5, 0xfd, 0xc6, 0x0e, 0x89, 0x16, - 0x38, 0xc3, 0x8a, 0x05, 0xff, 0x63, 0x3d, 0x20, 0x5f, 0x93, 0xc6, 0x31, 0x5d, 0x86, 0xc2, 0xb6, - 0x1f, 0x46, 0xc9, 0x17, 0xdf, 0x1b, 0x7e, 0x18, 0x61, 0x06, 0xb1, 0x7f, 0xc7, 0x82, 0x21, 0x96, - 0x41, 0xb6, 0x5f, 0x5a, 0xe3, 0x41, 0xbe, 0x0b, 0xbd, 0x08, 0xc3, 0x64, 0x73, 0x93, 0x34, 0x22, - 0x31, 0xab, 0xd2, 0x6d, 0x75, 0x78, 0x99, 0x95, 0xd2, 0x4b, 0x9f, 0x35, 0xc6, 0xff, 0x62, 0x81, - 0x8c, 0xde, 0x84, 0x52, 0xe4, 0xee, 0x92, 0x85, 0x66, 0x53, 0xbc, 0x99, 0x3d, 0x80, 0x97, 0xf0, - 0xba, 0x24, 0x80, 0x63, 0x5a, 0xf6, 0x57, 0x72, 0x00, 0x71, 0xa8, 0x81, 0x7e, 0x9f, 0xb8, 0xd8, - 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, - 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, 0x50, 0x09, 0x66, 0xdc, 0x18, 0x26, 0xa4, - 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, 0x46, 0xf0, 0x94, 0x77, 0x0d, 0x33, 0xc9, - 0x3c, 0x46, 0x86, 0xeb, 0xf8, 0xb9, 0x28, 0x97, 0xf9, 0x5c, 0xf4, 0xe3, 0x16, 0x9c, 0x4d, 0xb6, - 0xc3, 0x7c, 0xe4, 0xbe, 0x6c, 0xc1, 0x39, 0xf6, 0x68, 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x21, - 0x3d, 0x84, 0x44, 0xef, 0x1e, 0xc7, 0xfe, 0xd1, 0xab, 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x97, - 0x2d, 0x38, 0x9f, 0x99, 0xb8, 0x08, 0x5d, 0x85, 0xa2, 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, - 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, 0xa1, 0x62, 0x2e, 0x33, 0xa1, 0x62, 0xdf, - 0xfc, 0x88, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0xa2, 0x06, 0x38, 0x64, 0xde, 0x92, 0xf9, 0x68, 0x8d, - 0xe0, 0xe9, 0x97, 0xb3, 0xfd, 0xc4, 0x44, 0xc8, 0x74, 0x75, 0xa9, 0x1b, 0x81, 0xd2, 0x0d, 0x5a, - 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, - 0x95, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, 0xfd, 0x6f, 0x73, 0x30, 0x2a, 0x83, 0x75, - 0x77, 0xbc, 0x41, 0x24, 0xcb, 0x63, 0x65, 0xef, 0x61, 0x69, 0x5c, 0x29, 0xe1, 0x5a, 0x2c, 0x90, - 0xc7, 0x69, 0x5c, 0x25, 0x00, 0xc7, 0x38, 0xe8, 0x29, 0x18, 0x09, 0x3b, 0x1b, 0x0c, 0x3d, 0xe1, - 0xc4, 0x53, 0xe7, 0xc5, 0x58, 0xc2, 0xd1, 0xe7, 0x60, 0x8a, 0xd7, 0x0b, 0xfc, 0xb6, 0xb3, 0xc5, - 0xd5, 0x9f, 0x43, 0xca, 0xfb, 0x76, 0x6a, 0x35, 0x01, 0x3b, 0x3a, 0x28, 0x9f, 0x4d, 0x96, 0x31, - 0xc5, 0x79, 0x17, 0x15, 0xf6, 0x18, 0xcf, 0x1b, 0xa1, 0xcb, 0xb4, 0xeb, 0x0d, 0x3f, 0x06, 0x61, - 0x1d, 0xcf, 0x7e, 0x07, 0x50, 0x77, 0xd8, 0x72, 0xf4, 0x3a, 0xb7, 0xc0, 0x72, 0x03, 0xd2, 0xec, - 0xa5, 0x48, 0xd7, 0x7d, 0x4c, 0xa5, 0xfd, 0x3d, 0xaf, 0x85, 0x55, 0x7d, 0xfb, 0xaf, 0xe6, 0x61, - 0x2a, 0xe9, 0x71, 0x88, 0x6e, 0xc0, 0x30, 0xbf, 0x23, 0x05, 0xf9, 0x1e, 0xef, 0xb4, 0x9a, 0x9f, - 0x22, 0x3b, 0x2d, 0xc4, 0x35, 0x2b, 0xea, 0xa3, 0xb7, 0x61, 0xb4, 0xe9, 0xdf, 0xf3, 0xee, 0x39, - 0x41, 0x73, 0xa1, 0x56, 0x15, 0xcb, 0x39, 0x95, 0xd5, 0xae, 0xc4, 0x68, 0xba, 0xef, 0x23, 0x7b, - 0x93, 0x88, 0x41, 0x58, 0x27, 0x87, 0xd6, 0x59, 0x28, 0xc6, 0x4d, 0x77, 0x6b, 0xd5, 0x69, 0xf7, - 0x32, 0xc7, 0x5d, 0x92, 0x48, 0x1a, 0xe5, 0x71, 0x11, 0xaf, 0x91, 0x03, 0x70, 0x4c, 0x08, 0x7d, - 0x37, 0x9c, 0x09, 0x33, 0xb4, 0x73, 0x59, 0x59, 0x2c, 0x7a, 0x29, 0xac, 0x16, 0x1f, 0xa5, 0x42, - 0x50, 0x9a, 0x1e, 0x2f, 0xad, 0x19, 0xfb, 0xd7, 0xcf, 0x80, 0xb1, 0x89, 0x8d, 0xa4, 0x46, 0xd6, - 0x09, 0x25, 0x35, 0xc2, 0x50, 0x24, 0xbb, 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x92, 0xee, 0x2d, - 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, 0x9d, 0xf4, 0xcc, 0x53, 0xf9, 0x0f, 0x31, 0xf3, 0x54, - 0xe1, 0x14, 0x33, 0x4f, 0xad, 0xc1, 0xc8, 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x77, 0x9a, 0xba, - 0x0e, 0xaf, 0x73, 0x94, 0xee, 0x1c, 0x27, 0x02, 0x80, 0x25, 0x11, 0xf4, 0xba, 0xda, 0x81, 0xc3, - 0xd9, 0xc2, 0x5d, 0xf7, 0x83, 0x62, 0xea, 0x1e, 0x14, 0xf9, 0xa5, 0x46, 0x1e, 0x34, 0xbf, 0xd4, - 0x8a, 0xcc, 0x0a, 0x55, 0xcc, 0xb6, 0x9d, 0x67, 0x49, 0x9f, 0xfa, 0xe4, 0x82, 0xba, 0xab, 0x67, - 0xd2, 0x2a, 0x65, 0x9f, 0x04, 0x2a, 0x49, 0xd6, 0x80, 0xf9, 0xb3, 0xbe, 0xdf, 0x82, 0x73, 0xed, - 0xb4, 0xa4, 0x72, 0x22, 0x97, 0xd3, 0x8b, 0x03, 0x67, 0xcd, 0x33, 0x1a, 0x64, 0x52, 0x7e, 0x2a, - 0x1a, 0x4e, 0x6f, 0x8e, 0x0e, 0x74, 0xb0, 0xd1, 0x14, 0x09, 0xa0, 0x9e, 0xc8, 0x48, 0xc4, 0xd5, - 0x23, 0xfd, 0xd6, 0x7a, 0x4a, 0xd2, 0xa7, 0x8f, 0x67, 0x25, 0x7d, 0x1a, 0x38, 0xd5, 0xd3, 0xeb, - 0x2a, 0x05, 0xd7, 0x78, 0xf6, 0x52, 0xe2, 0x09, 0xb6, 0xfa, 0x26, 0xde, 0x7a, 0x5d, 0x25, 0xde, - 0xea, 0x11, 0x92, 0x8e, 0xa7, 0xd5, 0xea, 0x9b, 0x6e, 0x4b, 0x4b, 0x99, 0x35, 0x79, 0x32, 0x29, - 0xb3, 0x8c, 0xab, 0x86, 0x67, 0x6d, 0x7a, 0xba, 0xcf, 0x55, 0x63, 0xd0, 0xed, 0x7d, 0xd9, 0xf0, - 0xf4, 0x60, 0xd3, 0x0f, 0x94, 0x1e, 0xec, 0xae, 0x9e, 0x6e, 0x0b, 0xf5, 0xc9, 0x27, 0x45, 0x91, - 0x06, 0x4c, 0xb2, 0x75, 0x57, 0xbf, 0x00, 0xcf, 0x64, 0xd3, 0x55, 0xf7, 0x5c, 0x37, 0xdd, 0xd4, - 0x2b, 0xb0, 0x2b, 0x79, 0xd7, 0xd9, 0xd3, 0x49, 0xde, 0x75, 0xee, 0xc4, 0x93, 0x77, 0x3d, 0x72, - 0x0a, 0xc9, 0xbb, 0x1e, 0xfd, 0x50, 0x93, 0x77, 0xcd, 0x3c, 0x84, 0xe4, 0x5d, 0x6b, 0x71, 0xf2, - 0xae, 0xf3, 0xd9, 0x53, 0x92, 0x62, 0xd0, 0x9b, 0x91, 0xb2, 0xeb, 0x2e, 0x7b, 0xd5, 0xe7, 0x21, - 0x31, 0x44, 0xcc, 0xbc, 0xf4, 0x44, 0xc5, 0x69, 0x71, 0x33, 0xf8, 0x94, 0x28, 0x10, 0x8e, 0x49, - 0x51, 0xba, 0x71, 0x0a, 0xaf, 0xc7, 0x7a, 0xe8, 0x71, 0xd3, 0x34, 0x64, 0x3d, 0x12, 0x77, 0xbd, - 0xc6, 0x13, 0x77, 0x5d, 0xc8, 0x3e, 0xc9, 0x93, 0xd7, 0x9d, 0x99, 0xae, 0xeb, 0x07, 0x72, 0x70, - 0xa9, 0xf7, 0xbe, 0x88, 0xd5, 0x73, 0xb5, 0xf8, 0x39, 0x29, 0xa1, 0x9e, 0xe3, 0xb2, 0x55, 0x8c, - 0x35, 0x70, 0xdc, 0xa1, 0xeb, 0x30, 0xad, 0x2c, 0x81, 0x5b, 0x6e, 0x63, 0x5f, 0x4b, 0x80, 0xac, - 0x3c, 0x1e, 0xeb, 0x49, 0x04, 0xdc, 0x5d, 0x07, 0x2d, 0xc0, 0xa4, 0x51, 0x58, 0xad, 0x08, 0x19, - 0x4a, 0xe9, 0x03, 0xeb, 0x26, 0x18, 0x27, 0xf1, 0xed, 0x9f, 0xb6, 0xe0, 0xd1, 0x8c, 0xbc, 0x18, - 0x03, 0x87, 0xd5, 0xd9, 0x84, 0xc9, 0xb6, 0x59, 0xb5, 0x4f, 0xf4, 0x2d, 0x23, 0xfb, 0x86, 0xea, - 0x6b, 0x02, 0x80, 0x93, 0x44, 0x17, 0xaf, 0xfe, 0xe6, 0xef, 0x5d, 0xfa, 0xd8, 0x6f, 0xfd, 0xde, - 0xa5, 0x8f, 0xfd, 0xf6, 0xef, 0x5d, 0xfa, 0xd8, 0x5f, 0x38, 0xbc, 0x64, 0xfd, 0xe6, 0xe1, 0x25, - 0xeb, 0xb7, 0x0e, 0x2f, 0x59, 0xbf, 0x7d, 0x78, 0xc9, 0xfa, 0xdd, 0xc3, 0x4b, 0xd6, 0x57, 0x7e, - 0xff, 0xd2, 0xc7, 0xde, 0xca, 0xed, 0x3d, 0xf7, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x9b, - 0xc3, 0x9b, 0xb7, 0xea, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index b9d569f5..b99d1044 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -161,7 +161,7 @@ message AzureFileVolumeSource { // Deprecated in 1.7, please use the bindings subresource of pods instead. message Binding { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -218,6 +218,15 @@ message CSIPersistentVolumeSource { // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodePublishSecretRef = 8; + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference controllerExpandSecretRef = 9; } // Represents a source location of a volume to mount, managed by an external CSI driver @@ -266,7 +275,7 @@ message Capabilities { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSPersistentVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -274,23 +283,23 @@ message CephFSPersistentVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional SecretReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -299,7 +308,7 @@ message CephFSPersistentVolumeSource { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -307,23 +316,23 @@ message CephFSVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -333,20 +342,20 @@ message CephFSVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderPersistentVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -361,20 +370,20 @@ message CinderPersistentVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -417,7 +426,7 @@ message ComponentCondition { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. message ComponentStatus { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -431,7 +440,7 @@ message ComponentStatus { // Status of all the conditions for the component as a list of ComponentStatus objects. message ComponentStatusList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -442,7 +451,7 @@ message ComponentStatusList { // ConfigMap holds configuration data for pods to consume. message ConfigMap { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -487,14 +496,14 @@ message ConfigMapKeySelector { // The key to select. optional string key = 2; - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional optional bool optional = 3; } // ConfigMapList is a resource containing a list of ConfigMap objects. message ConfigMapList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -547,7 +556,7 @@ message ConfigMapProjection { // +optional repeated KeyToPath items = 2; - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -579,7 +588,7 @@ message ConfigMapVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -692,6 +701,17 @@ message Container { // +optional optional Probe readinessProbe = 11; + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is an alpha feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe startupProbe = 22; + // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -892,6 +912,13 @@ message ContainerStatus { // Container's ID in the format 'docker://'. // +optional optional string containerID = 8; + + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + optional bool started = 9; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -992,7 +1019,8 @@ message EndpointAddress { // EndpointPort is a tuple that describes a single port. message EndpointPort { - // The name of this port (corresponds to ServicePort.Name). + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -1049,7 +1077,7 @@ message EndpointSubset { // ] message Endpoints { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -1067,7 +1095,7 @@ message Endpoints { // EndpointsList is a list of endpoints. message EndpointsList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1132,10 +1160,197 @@ message EnvVarSource { optional SecretKeySelector secretKeyRef = 4; } +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +message EphemeralContainer { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + optional EphemeralContainerCommon ephemeralContainerCommon = 1; + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + optional string targetContainerName = 2; +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +message EphemeralContainerCommon { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + optional string name = 1; + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // Ports are not allowed for ephemeral containers. + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EnvVar env = 7; + + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + repeated VolumeMount volumeMounts = 9; + + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe livenessProbe = 10; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe readinessProbe = 11; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe startupProbe = 22; + + // Lifecycle is not allowed for ephemeral containers. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // SecurityContext is not allowed for ephemeral containers. + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +message EphemeralContainers { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 2; +} + // Event is a report of an event somewhere in the cluster. message Event { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. @@ -1200,7 +1415,7 @@ message Event { // EventList is a list of events. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1218,6 +1433,7 @@ message EventSeries { optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 optional string state = 3; } @@ -1408,22 +1624,22 @@ message GitRepoVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsPersistentVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; // EndpointsNamespace is the namespace that contains Glusterfs endpoint. // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional string endpointsNamespace = 4; } @@ -1432,16 +1648,16 @@ message GlusterfsPersistentVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; } @@ -1668,7 +1884,7 @@ message Lifecycle { optional Handler postStart = 1; // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness probe failure, + // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the // container crashes or exits. The reason for termination is passed to the // handler. The Pod's termination grace period countdown begins before the @@ -1684,12 +1900,12 @@ message Lifecycle { // LimitRange sets resource usage limits for each kind of resource in a Namespace. message LimitRange { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LimitRangeSpec spec = 2; } @@ -1724,7 +1940,7 @@ message LimitRangeItem { // LimitRangeList is a list of LimitRange items. message LimitRangeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1742,7 +1958,7 @@ message LimitRangeSpec { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1819,25 +2035,43 @@ message NFSVolumeSource { // Use of multiple namespaces is optional. message Namespace { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NamespaceSpec spec = 2; // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NamespaceStatus status = 3; } +// NamespaceCondition contains details about state of namespace. +message NamespaceCondition { + // Type of namespace controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // +optional + optional string reason = 5; + + // +optional + optional string message = 6; +} + // NamespaceList is a list of Namespaces. message NamespaceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1860,25 +2094,31 @@ message NamespaceStatus { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional optional string phase = 1; + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NamespaceCondition conditions = 2; } // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). message Node { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NodeSpec spec = 2; // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NodeStatus status = 3; } @@ -2006,7 +2246,7 @@ message NodeDaemonEndpoints { // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2074,6 +2314,13 @@ message NodeSpec { // +optional optional string podCIDR = 1; + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + repeated string podCIDRs = 7; + // ID of the node assigned by the cloud provider in the format: :// // +optional optional string providerID = 3; @@ -2126,6 +2373,9 @@ message NodeStatus { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2208,7 +2458,7 @@ message ObjectFieldSelector { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message ObjectReference { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; @@ -2232,7 +2482,7 @@ message ObjectReference { optional string apiVersion = 5; // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -2253,7 +2503,7 @@ message ObjectReference { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes message PersistentVolume { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2274,7 +2524,7 @@ message PersistentVolume { // PersistentVolumeClaim is a user's request for and claim to a persistent volume message PersistentVolumeClaim { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2318,7 +2568,7 @@ message PersistentVolumeClaimCondition { // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. message PersistentVolumeClaimList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2412,7 +2662,7 @@ message PersistentVolumeClaimVolumeSource { // PersistentVolumeList is a list of PersistentVolume items. message PersistentVolumeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2446,7 +2696,7 @@ message PersistentVolumeSource { // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsPersistentVolumeSource glusterfs = 4; @@ -2456,7 +2706,7 @@ message PersistentVolumeSource { optional NFSVolumeSource nfs = 5; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDPersistentVolumeSource rbd = 6; @@ -2465,8 +2715,8 @@ message PersistentVolumeSource { // +optional optional ISCSIPersistentVolumeSource iscsi = 7; - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderPersistentVolumeSource cinder = 8; @@ -2519,7 +2769,7 @@ message PersistentVolumeSource { optional LocalVolumeSource local = 20; // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; @@ -2613,12 +2863,12 @@ message PhotonPersistentDiskVolumeSource { // by clients and scheduled onto hosts. message Pod { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; @@ -2626,7 +2876,7 @@ message Pod { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodStatus status = 3; } @@ -2832,15 +3082,23 @@ message PodExecOptions { repeated string command = 6; } +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +message PodIP { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + optional string ip = 1; +} + // PodList is a list of Pods. message PodList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md repeated Pod items = 2; } @@ -2927,6 +3185,12 @@ message PodSecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 1; + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional WindowsSecurityContextOptions windowsOptions = 8; + // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -2998,7 +3262,7 @@ message PodSpec { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -3018,6 +3282,16 @@ message PodSpec { // +patchStrategy=merge repeated Container containers = 2; + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 34; + // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -3185,7 +3459,7 @@ message PodSpec { // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md - // This is an alpha feature and may change in the future. + // This is a beta feature as of Kubernetes v1.14. // +optional optional string runtimeClassName = 29; @@ -3194,6 +3468,37 @@ message PodSpec { // Optional: Defaults to true. // +optional optional bool enableServiceLinks = 30; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 31; + + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + map overhead = 32; + + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + repeated TopologySpreadConstraint topologySpreadConstraints = 33; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3256,6 +3561,14 @@ message PodStatus { // +optional optional string podIP = 6; + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + repeated PodIP podIPs = 12; + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3278,12 +3591,17 @@ message PodStatus { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional optional string qosClass = 9; + + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + repeated ContainerStatus ephemeralContainerStatuses = 13; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded message PodStatusResult { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3291,7 +3609,7 @@ message PodStatusResult { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodStatus status = 2; } @@ -3299,12 +3617,12 @@ message PodStatusResult { // PodTemplate describes a template for creating copies of a predefined pod. message PodTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodTemplateSpec template = 2; } @@ -3312,7 +3630,7 @@ message PodTemplate { // PodTemplateList is a list of PodTemplates. message PodTemplateList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3323,12 +3641,12 @@ message PodTemplateList { // PodTemplateSpec describes the data a pod should have when created from a template message PodTemplateSpec { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; } @@ -3408,7 +3726,7 @@ message Probe { optional int32 periodSeconds = 4; // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. // +optional optional int32 successThreshold = 5; @@ -3468,11 +3786,11 @@ message QuobyteVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDPersistentVolumeSource { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3485,32 +3803,32 @@ message RBDPersistentVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional SecretReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3519,11 +3837,11 @@ message RBDPersistentVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDVolumeSource { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3536,32 +3854,32 @@ message RBDVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3569,7 +3887,7 @@ message RBDVolumeSource { // RangeAllocation is not a public type. message RangeAllocation { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3584,12 +3902,12 @@ message RangeAllocation { message ReplicationController { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicationControllerSpec spec = 2; @@ -3597,7 +3915,7 @@ message ReplicationController { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicationControllerStatus status = 3; } @@ -3626,7 +3944,7 @@ message ReplicationControllerCondition { // ReplicationControllerList is a collection of replication controllers. message ReplicationControllerList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3712,17 +4030,17 @@ message ResourceFieldSelector { // ResourceQuota sets aggregate quota restrictions enforced per namespace message ResourceQuota { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ResourceQuotaSpec spec = 2; // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ResourceQuotaStatus status = 3; } @@ -3730,7 +4048,7 @@ message ResourceQuota { // ResourceQuotaList is a list of ResourceQuota items. message ResourceQuotaList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3926,7 +4244,7 @@ message ScopedResourceSelectorRequirement { // the Data field must be less than MaxSecretSize bytes. message Secret { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3972,7 +4290,7 @@ message SecretKeySelector { // The key of the secret to select from. Must be a valid secret key. optional string key = 2; - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional optional bool optional = 3; } @@ -3980,7 +4298,7 @@ message SecretKeySelector { // SecretList is a list of Secret. message SecretList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4054,7 +4372,7 @@ message SecretVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the Secret or it's keys must be defined + // Specify whether the Secret or its keys must be defined // +optional optional bool optional = 4; } @@ -4081,6 +4399,12 @@ message SecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 3; + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional WindowsSecurityContextOptions windowsOptions = 10; + // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -4138,19 +4462,19 @@ message SerializedReference { // will answer requests sent through the proxy. message Service { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ServiceSpec spec = 2; // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ServiceStatus status = 3; } @@ -4161,7 +4485,7 @@ message Service { // * a set of secrets message ServiceAccount { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -4188,7 +4512,7 @@ message ServiceAccount { // ServiceAccountList is a list of ServiceAccount objects message ServiceAccountList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4226,7 +4550,7 @@ message ServiceAccountTokenProjection { // ServiceList holds a list of services. message ServiceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4237,8 +4561,9 @@ message ServiceList { // ServicePort contains information on service's port. message ServicePort { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. // Optional if only one ServicePort is defined on this service. // +optional optional string name = 1; @@ -4398,6 +4723,16 @@ message ServiceSpec { // sessionAffinityConfig contains the configurations of session affinity. // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + optional string ipFamily = 15; } // ServiceStatus represents the current status of a service. @@ -4576,6 +4911,59 @@ message TopologySelectorTerm { repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; } +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +message TopologySpreadConstraint { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + optional int32 maxSkew = 1; + + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + optional string topologyKey = 2; + + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + optional string whenUnsatisfiable = 3; + + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; +} + // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. message TypedLocalObjectReference { @@ -4644,7 +5032,7 @@ message VolumeMount { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is alpha in 1.14. + // This field is beta in 1.15. // +optional optional string subPathExpr = 6; } @@ -4724,12 +5112,12 @@ message VolumeSource { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional optional ISCSIVolumeSource iscsi = 8; // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -4740,7 +5128,7 @@ message VolumeSource { optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; @@ -4749,8 +5137,8 @@ message VolumeSource { // +optional optional FlexVolumeSource flexVolume = 12; - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; @@ -4843,3 +5231,26 @@ message WeightedPodAffinityTerm { optional PodAffinityTerm podAffinityTerm = 2; } +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +message WindowsSecurityContextOptions { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + optional string gmsaCredentialSpecName = 1; + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + optional string gmsaCredentialSpec = 2; + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // +optional + optional string runAsUserName = 3; +} + diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 1aac0cb4..8da1ddea 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -88,6 +88,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RangeAllocation{}, &ConfigMap{}, &ConfigMapList{}, + &EphemeralContainers{}, ) // Add common types diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go index 7b606a30..db71bd2f 100644 --- a/vendor/k8s.io/api/core/v1/taint.go +++ b/vendor/k8s.io/api/core/v1/taint.go @@ -24,8 +24,14 @@ func (t *Taint) MatchTaint(taintToMatch *Taint) bool { return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect } -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +// taint.ToString() converts taint struct to string in format '=:', '=:', ':', or ''. func (t *Taint) ToString() string { + if len(t.Effect) == 0 { + if len(t.Value) == 0 { + return fmt.Sprintf("%v", t.Key) + } + return fmt.Sprintf("%v=%v:", t.Key, t.Value) + } if len(t.Value) == 0 { return fmt.Sprintf("%v:%v", t.Key, t.Effect) } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 3af13440..fcd45540 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -87,11 +87,11 @@ type VolumeSource struct { NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // PersistentVolumeClaimVolumeSource represents a reference to a @@ -100,15 +100,15 @@ type VolumeSource struct { // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -192,7 +192,7 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. @@ -200,15 +200,15 @@ type PersistentVolumeSource struct { // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -248,7 +248,7 @@ type PersistentVolumeSource struct { // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` // CSI represents storage that is handled by an external CSI driver (Beta feature). @@ -275,7 +275,7 @@ const ( type PersistentVolume struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -390,7 +390,7 @@ type PersistentVolumeStatus struct { type PersistentVolumeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. @@ -405,7 +405,7 @@ type PersistentVolumeList struct { type PersistentVolumeClaim struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -427,7 +427,7 @@ type PersistentVolumeClaim struct { type PersistentVolumeClaimList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. @@ -625,16 +625,16 @@ type EmptyDirVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } @@ -643,22 +643,22 @@ type GlusterfsVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsPersistentVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // EndpointsNamespace is the namespace that contains Glusterfs endpoint. // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` } @@ -667,10 +667,10 @@ type GlusterfsPersistentVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -681,28 +681,28 @@ type RBDVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -711,10 +711,10 @@ type RBDVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDPersistentVolumeSource struct { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -725,28 +725,28 @@ type RBDPersistentVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -756,18 +756,18 @@ type RBDPersistentVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -781,18 +781,18 @@ type CinderVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderPersistentVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -805,26 +805,26 @@ type CinderPersistentVolumeSource struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -844,26 +844,26 @@ type SecretReference struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSPersistentVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -1094,7 +1094,7 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` - // Specify whether the Secret or it's keys must be defined + // Specify whether the Secret or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1520,7 +1520,7 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1547,7 +1547,7 @@ type ConfigMapProjection struct { // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1692,6 +1692,15 @@ type CSIPersistentVolumeSource struct { // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` } // Represents a source location of a volume to mount, managed by an external CSI driver @@ -1775,7 +1784,7 @@ type VolumeMount struct { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is alpha in 1.14. + // This field is beta in 1.15. // +optional SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` } @@ -1880,7 +1889,7 @@ type ConfigMapKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key to select. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -1891,7 +1900,7 @@ type SecretKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -2016,7 +2025,7 @@ type Probe struct { // +optional PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. // +optional SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` // Minimum consecutive failures for the probe to be considered failed after having succeeded. @@ -2037,6 +2046,16 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// PreemptionPolicy describes a policy for if/when to preempt a pod. +type PreemptionPolicy string + +const ( + // PreemptLowerPriority means that pod can preempt other pods with lower priority. + PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" + // PreemptNever means that pod never preempts other pods with lower priority. + PreemptNever PreemptionPolicy = "Never" +) + // TerminationMessagePolicy describes how termination messages are retrieved from a container. type TerminationMessagePolicy string @@ -2177,6 +2196,16 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is an alpha feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -2263,7 +2292,7 @@ type Lifecycle struct { // +optional PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness probe failure, + // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the // container crashes or exits. The reason for termination is passed to the // handler. The Pod's termination grace period countdown begins before the @@ -2371,6 +2400,12 @@ type ContainerStatus struct { // Container's ID in the format 'docker://'. // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2401,18 +2436,22 @@ type PodConditionType string // These are valid conditions of pod. const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" // PodReady means the pod is able to service requests and should be added to the // load balancing pools of all matching services. PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" +) + +// These are reasons for a pod's transition to a condition. +const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" - // ContainersReady indicates whether all containers in the pod are ready. - ContainersReady PodConditionType = "ContainersReady" ) // PodCondition contains details for the current condition of this pod. @@ -2802,7 +2841,7 @@ type PodSpec struct { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -2820,6 +2859,15 @@ type PodSpec struct { // +patchMergeKey=name // +patchStrategy=merge Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -2953,7 +3001,6 @@ type PodSpec struct { // configuration based on DNSPolicy. // +optional DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` - // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" @@ -2965,7 +3012,7 @@ type PodSpec struct { // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md - // This is an alpha feature and may change in the future. + // This is a beta feature as of Kubernetes v1.14. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` // EnableServiceLinks indicates whether information about services should be injected into pod's @@ -2973,6 +3020,95 @@ type PodSpec struct { // Optional: Defaults to true. // +optional EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` +} + +type UnsatisfiableConstraintAction string + +const ( + // DoNotSchedule instructs the scheduler not to schedule the pod + // when constraints are not satisfied. + DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" + // ScheduleAnyway instructs the scheduler to schedule the pod + // even if constraints are not satisfied. + ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" +) + +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +type TopologySpreadConstraint struct { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"` + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"` + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"` + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"` } const ( @@ -3000,6 +3136,11 @@ type PodSecurityContext struct { // takes precedence for that container. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -3085,6 +3226,175 @@ type PodDNSConfigOption struct { Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +type PodIP struct { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +type EphemeralContainerCommon struct { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // Ports are not allowed for ephemeral containers. + Ports []ContainerPort `json:"ports,omitempty" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Probes are not allowed for ephemeral containers. + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Lifecycle is not allowed for ephemeral containers. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext is not allowed for ephemeral containers. + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// EphemeralContainerCommon converts to Container. All fields must be kept in sync between +// these two types. +var _ = Container(EphemeralContainerCommon{}) + +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +type EphemeralContainer struct { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"` + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system, especially if the node that hosts the pod cannot contact the control // plane. @@ -3140,6 +3450,14 @@ type PodStatus struct { // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3161,6 +3479,10 @@ type PodStatus struct { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -3169,19 +3491,21 @@ type PodStatus struct { type PodStatusResult struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // +genclient +// +genclient:method=GetEphemeralContainers,verb=get,subresource=ephemeralcontainers,result=EphemeralContainers +// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers,input=EphemeralContainers,result=EphemeralContainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Pod is a collection of containers that can run on a host. This resource is created @@ -3189,12 +3513,12 @@ type PodStatusResult struct { type Pod struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3202,7 +3526,7 @@ type Pod struct { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3213,24 +3537,24 @@ type Pod struct { type PodList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -3242,12 +3566,12 @@ type PodTemplateSpec struct { type PodTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -3258,7 +3582,7 @@ type PodTemplate struct { type PodTemplateList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3370,12 +3694,12 @@ type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3383,7 +3707,7 @@ type ReplicationController struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3394,7 +3718,7 @@ type ReplicationController struct { type ReplicationControllerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3495,6 +3819,17 @@ type LoadBalancerIngress struct { Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` } +// IPFamily represents the IP Family (IPv4 or IPv6). This type is used +// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) +type IPFamily string + +const ( + // IPv4Protocol indicates that this IP is IPv4 protocol + IPv4Protocol IPFamily = "IPv4" + // IPv6Protocol indicates that this IP is IPv6 protocol + IPv6Protocol IPFamily = "IPv6" +) + // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. @@ -3610,13 +3945,24 @@ type ServiceSpec struct { // sessionAffinityConfig contains the configurations of session affinity. // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` } // ServicePort contains information on service's port. type ServicePort struct { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. // Optional if only one ServicePort is defined on this service. // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -3659,19 +4005,19 @@ type ServicePort struct { type Service struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3688,7 +4034,7 @@ const ( type ServiceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3706,7 +4052,7 @@ type ServiceList struct { type ServiceAccount struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3736,7 +4082,7 @@ type ServiceAccount struct { type ServiceAccountList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3763,7 +4109,7 @@ type ServiceAccountList struct { type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3825,7 +4171,8 @@ type EndpointAddress struct { // EndpointPort is a tuple that describes a single port. type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -3847,7 +4194,7 @@ type EndpointPort struct { type EndpointsList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3860,6 +4207,14 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node. // +optional PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` + + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` + // ID of the node assigned by the cloud provider in the format: :// // +optional ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` @@ -4041,6 +4396,9 @@ type NodeStatus struct { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -4140,9 +4498,6 @@ type NodeConditionType string const ( // NodeReady means kubelet is healthy and ready to accept pods. NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. NodeMemoryPressure NodeConditionType = "MemoryPressure" // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. @@ -4233,19 +4588,19 @@ type ResourceList map[ResourceName]resource.Quantity type Node struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4256,7 +4611,7 @@ type Node struct { type NodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4287,6 +4642,12 @@ type NamespaceStatus struct { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } type NamespacePhase string @@ -4299,6 +4660,32 @@ const ( NamespaceTerminating NamespacePhase = "Terminating" ) +type NamespaceConditionType string + +// These are valid conditions of a namespace. +const ( + // NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery. + NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" + // NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources. + NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure" + // NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types. + NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure" +) + +// NamespaceCondition contains details about state of namespace. +type NamespaceCondition struct { + // Type of namespace controller condition. + Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + // +genclient // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection @@ -4309,17 +4696,17 @@ const ( type Namespace struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4330,7 +4717,7 @@ type Namespace struct { type NamespaceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4346,7 +4733,7 @@ type NamespaceList struct { type Binding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4354,6 +4741,22 @@ type Binding struct { Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +type EphemeralContainers struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=ephemeralContainers"` +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. // +k8s:openapi-gen=false type Preconditions struct { @@ -4537,7 +4940,7 @@ type ServiceProxyOptions struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ObjectReference struct { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // Namespace of the referent. @@ -4556,7 +4959,7 @@ type ObjectReference struct { // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -4631,7 +5034,7 @@ const ( type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. @@ -4701,6 +5104,7 @@ type EventSeries struct { // Time of the last occurrence observed LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` } @@ -4718,7 +5122,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4778,12 +5182,12 @@ type LimitRangeSpec struct { type LimitRange struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -4794,7 +5198,7 @@ type LimitRange struct { type LimitRangeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4934,17 +5338,17 @@ type ResourceQuotaStatus struct { type ResourceQuota struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4955,7 +5359,7 @@ type ResourceQuota struct { type ResourceQuotaList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4972,7 +5376,7 @@ type ResourceQuotaList struct { type Secret struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5089,7 +5493,7 @@ const ( type SecretList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5105,7 +5509,7 @@ type SecretList struct { type ConfigMap struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5134,7 +5538,7 @@ type ConfigMap struct { type ConfigMapList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5176,7 +5580,7 @@ type ComponentCondition struct { type ComponentStatus struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5193,7 +5597,7 @@ type ComponentStatus struct { type ComponentStatusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5267,6 +5671,11 @@ type SecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -5337,13 +5746,36 @@ type SELinuxOptions struct { Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` } +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +type WindowsSecurityContextOptions struct { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"` + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"` + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // +optional + RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RangeAllocation is not a public type. type RangeAllocation struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 2c5b04f2..35b8389a 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -108,7 +108,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { var map_Binding = map[string]string{ "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -126,6 +126,7 @@ var map_CSIPersistentVolumeSource = map[string]string{ "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -157,12 +158,12 @@ func (Capabilities) SwaggerDoc() map[string]string { var map_CephFSPersistentVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -171,12 +172,12 @@ func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { @@ -185,9 +186,9 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string { var map_CinderPersistentVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -197,9 +198,9 @@ func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CinderVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -230,7 +231,7 @@ func (ComponentCondition) SwaggerDoc() map[string]string { var map_ComponentStatus = map[string]string{ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "conditions": "List of component conditions observed", } @@ -240,7 +241,7 @@ func (ComponentStatus) SwaggerDoc() map[string]string { var map_ComponentStatusList = map[string]string{ "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ComponentStatus objects.", } @@ -250,7 +251,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { var map_ConfigMap = map[string]string{ "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", } @@ -271,7 +272,7 @@ func (ConfigMapEnvSource) SwaggerDoc() map[string]string { var map_ConfigMapKeySelector = map[string]string{ "": "Selects a key from a ConfigMap.", "key": "The key to select.", - "optional": "Specify whether the ConfigMap or it's key must be defined", + "optional": "Specify whether the ConfigMap or its key must be defined", } func (ConfigMapKeySelector) SwaggerDoc() map[string]string { @@ -280,7 +281,7 @@ func (ConfigMapKeySelector) SwaggerDoc() map[string]string { var map_ConfigMapList = map[string]string{ "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ConfigMaps.", } @@ -304,7 +305,7 @@ func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { var map_ConfigMapProjection = map[string]string{ "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", + "optional": "Specify whether the ConfigMap or its keys must be defined", } func (ConfigMapProjection) SwaggerDoc() map[string]string { @@ -315,7 +316,7 @@ var map_ConfigMapVolumeSource = map[string]string{ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", + "optional": "Specify whether the ConfigMap or its keys must be defined", } func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { @@ -337,6 +338,7 @@ var map_Container = map[string]string{ "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -429,6 +431,7 @@ var map_ContainerStatus = map[string]string{ "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", "imageID": "ImageID of the container's image.", "containerID": "Container's ID in the format 'docker://'.", + "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -499,7 +502,7 @@ func (EndpointAddress) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort is a tuple that describes a single port.", - "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", + "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", } @@ -521,7 +524,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { var map_Endpoints = map[string]string{ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -531,7 +534,7 @@ func (Endpoints) SwaggerDoc() map[string]string { var map_EndpointsList = map[string]string{ "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -573,9 +576,57 @@ func (EnvVarSource) SwaggerDoc() map[string]string { return map_EnvVarSource } +var map_EphemeralContainer = map[string]string{ + "": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", + "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", +} + +func (EphemeralContainer) SwaggerDoc() map[string]string { + return map_EphemeralContainer +} + +var map_EphemeralContainerCommon = map[string]string{ + "": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", + "name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "Ports are not allowed for ephemeral containers.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", + "livenessProbe": "Probes are not allowed for ephemeral containers.", + "readinessProbe": "Probes are not allowed for ephemeral containers.", + "startupProbe": "Probes are not allowed for ephemeral containers.", + "lifecycle": "Lifecycle is not allowed for ephemeral containers.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "securityContext": "SecurityContext is not allowed for ephemeral containers.", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (EphemeralContainerCommon) SwaggerDoc() map[string]string { + return map_EphemeralContainerCommon +} + +var map_EphemeralContainers = map[string]string{ + "": "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.", + "ephemeralContainers": "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.", +} + +func (EphemeralContainers) SwaggerDoc() map[string]string { + return map_EphemeralContainers +} + var map_Event = map[string]string{ "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", "message": "A human-readable description of the status of this operation.", @@ -598,7 +649,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of events", } @@ -610,7 +661,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time of the last occurrence observed", - "state": "State of this Series: Ongoing or Finished", + "state": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18", } func (EventSeries) SwaggerDoc() map[string]string { @@ -710,10 +761,10 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsPersistentVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -722,9 +773,9 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -837,7 +888,7 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -846,8 +897,8 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (LimitRange) SwaggerDoc() map[string]string { @@ -870,7 +921,7 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", } @@ -938,18 +989,28 @@ func (NFSVolumeSource) SwaggerDoc() map[string]string { var map_Namespace = map[string]string{ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Namespace) SwaggerDoc() map[string]string { return map_Namespace } +var map_NamespaceCondition = map[string]string{ + "": "NamespaceCondition contains details about state of namespace.", + "type": "Type of namespace controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", +} + +func (NamespaceCondition) SwaggerDoc() map[string]string { + return map_NamespaceCondition +} + var map_NamespaceList = map[string]string{ "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", } @@ -967,8 +1028,9 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { } var map_NamespaceStatus = map[string]string{ - "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "conditions": "Represents the latest available observations of a namespace's current state.", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -977,9 +1039,9 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { var map_Node = map[string]string{ "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Node) SwaggerDoc() map[string]string { @@ -1052,7 +1114,7 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of nodes", } @@ -1111,6 +1173,7 @@ func (NodeSelectorTerm) SwaggerDoc() map[string]string { var map_NodeSpec = map[string]string{ "": "NodeSpec describes the attributes that a node is created with.", "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", + "podCIDRs": "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", @@ -1128,7 +1191,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1171,12 +1234,12 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", } @@ -1186,7 +1249,7 @@ func (ObjectReference) SwaggerDoc() map[string]string { var map_PersistentVolume = map[string]string{ "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", } @@ -1197,7 +1260,7 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1220,7 +1283,7 @@ func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1267,7 +1330,7 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", } @@ -1280,11 +1343,11 @@ var map_PersistentVolumeSource = map[string]string{ "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", @@ -1297,7 +1360,7 @@ var map_PersistentVolumeSource = map[string]string{ "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", - "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", + "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", } @@ -1344,9 +1407,9 @@ func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { var map_Pod = map[string]string{ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Pod) SwaggerDoc() map[string]string { @@ -1445,10 +1508,19 @@ func (PodExecOptions) SwaggerDoc() map[string]string { return map_PodExecOptions } +var map_PodIP = map[string]string{ + "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", +} + +func (PodIP) SwaggerDoc() map[string]string { + return map_PodIP +} + var map_PodList = map[string]string{ "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", } func (PodList) SwaggerDoc() map[string]string { @@ -1501,6 +1573,7 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -1525,8 +1598,9 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", - "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", @@ -1552,8 +1626,11 @@ var map_PodSpec = map[string]string{ "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is an alpha feature and may change in the future.", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", + "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1561,18 +1638,20 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1581,8 +1660,8 @@ func (PodStatus) SwaggerDoc() map[string]string { var map_PodStatusResult = map[string]string{ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodStatusResult) SwaggerDoc() map[string]string { @@ -1591,8 +1670,8 @@ func (PodStatusResult) SwaggerDoc() map[string]string { var map_PodTemplate = map[string]string{ "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodTemplate) SwaggerDoc() map[string]string { @@ -1601,7 +1680,7 @@ func (PodTemplate) SwaggerDoc() map[string]string { var map_PodTemplateList = map[string]string{ "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of pod templates", } @@ -1611,8 +1690,8 @@ func (PodTemplateList) SwaggerDoc() map[string]string { var map_PodTemplateSpec = map[string]string{ "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodTemplateSpec) SwaggerDoc() map[string]string { @@ -1666,7 +1745,7 @@ var map_Probe = map[string]string{ "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", } @@ -1700,14 +1779,14 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { var map_RBDPersistentVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1716,14 +1795,14 @@ func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1732,7 +1811,7 @@ func (RBDVolumeSource) SwaggerDoc() map[string]string { var map_RangeAllocation = map[string]string{ "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "range": "Range is string that identifies the range represented by 'data'.", "data": "Data is a bit array containing all allocated addresses in the previous segment.", } @@ -1743,9 +1822,9 @@ func (RangeAllocation) SwaggerDoc() map[string]string { var map_ReplicationController = map[string]string{ "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicationController) SwaggerDoc() map[string]string { @@ -1767,7 +1846,7 @@ func (ReplicationControllerCondition) SwaggerDoc() map[string]string { var map_ReplicationControllerList = map[string]string{ "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -1814,9 +1893,9 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ResourceQuota) SwaggerDoc() map[string]string { @@ -1825,7 +1904,7 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", } @@ -1934,7 +2013,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", "type": "Used to facilitate programmatic handling of secret data.", @@ -1956,7 +2035,7 @@ func (SecretEnvSource) SwaggerDoc() map[string]string { var map_SecretKeySelector = map[string]string{ "": "SecretKeySelector selects a key of a Secret.", "key": "The key of the secret to select from. Must be a valid secret key.", - "optional": "Specify whether the Secret or it's key must be defined", + "optional": "Specify whether the Secret or its key must be defined", } func (SecretKeySelector) SwaggerDoc() map[string]string { @@ -1965,7 +2044,7 @@ func (SecretKeySelector) SwaggerDoc() map[string]string { var map_SecretList = map[string]string{ "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", } @@ -1998,7 +2077,7 @@ var map_SecretVolumeSource = map[string]string{ "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the Secret or it's keys must be defined", + "optional": "Specify whether the Secret or its keys must be defined", } func (SecretVolumeSource) SwaggerDoc() map[string]string { @@ -2010,6 +2089,7 @@ var map_SecurityContext = map[string]string{ "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -2033,9 +2113,9 @@ func (SerializedReference) SwaggerDoc() map[string]string { var map_Service = map[string]string{ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Service) SwaggerDoc() map[string]string { @@ -2044,7 +2124,7 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", @@ -2056,7 +2136,7 @@ func (ServiceAccount) SwaggerDoc() map[string]string { var map_ServiceAccountList = map[string]string{ "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", } @@ -2077,7 +2157,7 @@ func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of services", } @@ -2087,7 +2167,7 @@ func (ServiceList) SwaggerDoc() map[string]string { var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", - "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", @@ -2122,6 +2202,7 @@ var map_ServiceSpec = map[string]string{ "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", + "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2236,6 +2317,18 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string { return map_TopologySelectorTerm } +var map_TopologySpreadConstraint = map[string]string{ + "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", + "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", +} + +func (TopologySpreadConstraint) SwaggerDoc() map[string]string { + return map_TopologySpreadConstraint +} + var map_TypedLocalObjectReference = map[string]string{ "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", @@ -2273,7 +2366,7 @@ var map_VolumeMount = map[string]string{ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.", } func (VolumeMount) SwaggerDoc() map[string]string { @@ -2310,12 +2403,12 @@ var map_VolumeSource = map[string]string{ "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", @@ -2359,4 +2452,15 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { return map_WeightedPodAffinityTerm } +var map_WindowsSecurityContextOptions = map[string]string{ + "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", + "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.", +} + +func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { + return map_WindowsSecurityContextOptions +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go index 4497760d..3287fb51 100644 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -33,4 +33,10 @@ const ( // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" + + // IsHeadlessService is added by Controller to an Endpoint denoting if its parent + // Service is Headless. The existence of this label can be used further by other + // controllers and kube-proxy to check if the Endpoint objects should be replicated when + // using Headless Services + IsHeadlessService = "service.kubernetes.io/headless" ) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 9a580c07..fd47019c 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -237,6 +237,11 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } + if in.ControllerExpandSecretRef != nil { + in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef + *out = new(SecretReference) + **out = **in + } return } @@ -480,7 +485,7 @@ func (in *ComponentStatus) DeepCopyObject() runtime.Object { func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ComponentStatus, len(*in)) @@ -605,7 +610,7 @@ func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ConfigMap, len(*in)) @@ -768,6 +773,11 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(Probe) (*in).DeepCopyInto(*out) } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle *out = new(Lifecycle) @@ -915,6 +925,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = *in in.State.DeepCopyInto(&out.State) in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + if in.Started != nil { + in, out := &in.Started, &out.Started + *out = new(bool) + **out = **in + } return } @@ -1161,7 +1176,7 @@ func (in *Endpoints) DeepCopyObject() runtime.Object { func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Endpoints, len(*in)) @@ -1273,6 +1288,139 @@ func (in *EnvVarSource) DeepCopy() *EnvVarSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) { + *out = *in + in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer. +func (in *EphemeralContainer) DeepCopy() *EphemeralContainer { + if in == nil { + return nil + } + out := new(EphemeralContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon. +func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon { + if in == nil { + return nil + } + out := new(EphemeralContainerCommon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainers) DeepCopyInto(out *EphemeralContainers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainers. +func (in *EphemeralContainers) DeepCopy() *EphemeralContainers { + if in == nil { + return nil + } + out := new(EphemeralContainers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralContainers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in @@ -1318,7 +1466,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) @@ -1875,7 +2023,7 @@ func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]LimitRange, len(*in)) @@ -1931,7 +2079,7 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -2056,7 +2204,7 @@ func (in *Namespace) DeepCopyInto(out *Namespace) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -2078,11 +2226,28 @@ func (in *Namespace) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition. +func (in *NamespaceCondition) DeepCopy() *NamespaceCondition { + if in == nil { + return nil + } + out := new(NamespaceCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Namespace, len(*in)) @@ -2135,6 +2300,13 @@ func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2311,7 +2483,7 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Node, len(*in)) @@ -2465,6 +2637,11 @@ func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { *out = *in + if in.PodCIDRs != nil { + in, out := &in.PodCIDRs, &out.PodCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Taints != nil { in, out := &in.Taints, &out.Taints *out = make([]Taint, len(*in)) @@ -2690,7 +2867,7 @@ func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondi func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolumeClaim, len(*in)) @@ -2816,7 +2993,7 @@ func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVo func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolume, len(*in)) @@ -3293,11 +3470,27 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodIP) DeepCopyInto(out *PodIP) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP. +func (in *PodIP) DeepCopy() *PodIP { + if in == nil { + return nil + } + out := new(PodIP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodList) DeepCopyInto(out *PodList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Pod, len(*in)) @@ -3449,6 +3642,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(SELinuxOptions) **out = **in } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -3537,6 +3735,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) @@ -3618,6 +3823,25 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(bool) **out = **in } + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(PreemptionPolicy) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3641,6 +3865,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PodIPs != nil { + in, out := &in.PodIPs, &out.PodIPs + *out = make([]PodIP, len(*in)) + copy(*out, *in) + } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime *out = (*in).DeepCopy() @@ -3659,6 +3888,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EphemeralContainerStatuses != nil { + in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3730,7 +3966,7 @@ func (in *PodTemplate) DeepCopyObject() runtime.Object { func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodTemplate, len(*in)) @@ -4042,7 +4278,7 @@ func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondi func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicationController, len(*in)) @@ -4198,7 +4434,7 @@ func (in *ResourceQuota) DeepCopyObject() runtime.Object { func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceQuota, len(*in)) @@ -4518,7 +4754,7 @@ func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { func (in *SecretList) DeepCopyInto(out *SecretList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Secret, len(*in)) @@ -4643,6 +4879,11 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = new(SELinuxOptions) **out = **in } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -4785,7 +5026,7 @@ func (in *ServiceAccount) DeepCopyObject() runtime.Object { func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServiceAccount, len(*in)) @@ -4839,7 +5080,7 @@ func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjecti func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) @@ -4940,6 +5181,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(IPFamily) + **out = **in + } return } @@ -5151,6 +5397,27 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { *out = *in @@ -5471,3 +5738,34 @@ func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { + *out = *in + if in.GMSACredentialSpecName != nil { + in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName + *out = new(string) + **out = **in + } + if in.GMSACredentialSpec != nil { + in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec + *out = new(string) + **out = **in + } + if in.RunAsUserName != nil { + in, out := &in.RunAsUserName, &out.RunAsUserName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions. +func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions { + if in == nil { + return nil + } + out := new(WindowsSecurityContextOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/doc.go b/vendor/k8s.io/api/discovery/v1alpha1/doc.go new file mode 100644 index 00000000..ffd6b0b5 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=discovery.k8s.io + +package v1alpha1 // import "k8s.io/api/discovery/v1alpha1" diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go new file mode 100644 index 00000000..34f6b28d --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go @@ -0,0 +1,1689 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } +func (*EndpointConditions) ProtoMessage() {} +func (*EndpointConditions) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{1} +} +func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointConditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointConditions.Merge(m, src) +} +func (m *EndpointConditions) XXX_Size() int { + return m.Size() +} +func (m *EndpointConditions) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointConditions.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{2} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } +func (*EndpointSlice) ProtoMessage() {} +func (*EndpointSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{3} +} +func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSlice.Merge(m, src) +} +func (m *EndpointSlice) XXX_Size() int { + return m.Size() +} +func (m *EndpointSlice) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo + +func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } +func (*EndpointSliceList) ProtoMessage() {} +func (*EndpointSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{4} +} +func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSliceList.Merge(m, src) +} +func (m *EndpointSliceList) XXX_Size() int { + return m.Size() +} +func (m *EndpointSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Endpoint)(nil), "k8s.io.api.discovery.v1alpha1.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.discovery.v1alpha1.Endpoint.TopologyEntry") + proto.RegisterType((*EndpointConditions)(nil), "k8s.io.api.discovery.v1alpha1.EndpointConditions") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1alpha1.EndpointPort") + proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1alpha1.EndpointSlice") + proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1alpha1.EndpointSliceList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1/generated.proto", fileDescriptor_772f83c5b34e07a5) +} + +var fileDescriptor_772f83c5b34e07a5 = []byte{ + // 728 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a, + 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0xd4, 0x8e, 0xee, 0x22, 0xca, 0xbd, 0xd7, 0x8e, 0xc2, + 0x82, 0x48, 0x85, 0x31, 0xa9, 0x28, 0xaa, 0x60, 0x43, 0x8d, 0xca, 0x43, 0xe2, 0x11, 0x86, 0x2e, + 0x10, 0x62, 0xc1, 0xc4, 0x9e, 0x3a, 0x26, 0x89, 0xc7, 0xb2, 0x27, 0x91, 0xb2, 0xe3, 0x27, 0x20, + 0xf1, 0x77, 0x58, 0xb2, 0xe8, 0xb2, 0xcb, 0xae, 0x0c, 0x35, 0xff, 0xa2, 0x2b, 0x34, 0xe3, 0x57, + 0x4a, 0x78, 0x64, 0x37, 0xe7, 0x9b, 0xf3, 0x7d, 0xe7, 0x9c, 0x6f, 0xce, 0x80, 0x87, 0xe3, 0x83, + 0x18, 0xf9, 0xcc, 0x1a, 0xcf, 0x86, 0x34, 0x0a, 0x28, 0xa7, 0xb1, 0x35, 0xa7, 0x81, 0xcb, 0x22, + 0x2b, 0xbf, 0x20, 0xa1, 0x6f, 0xb9, 0x7e, 0xec, 0xb0, 0x39, 0x8d, 0x16, 0xd6, 0xbc, 0x4f, 0x26, + 0xe1, 0x88, 0xf4, 0x2d, 0x8f, 0x06, 0x34, 0x22, 0x9c, 0xba, 0x28, 0x8c, 0x18, 0x67, 0xf0, 0xff, + 0x2c, 0x1d, 0x91, 0xd0, 0x47, 0x65, 0x3a, 0x2a, 0xd2, 0xdb, 0x37, 0x3d, 0x9f, 0x8f, 0x66, 0x43, + 0xe4, 0xb0, 0xa9, 0xe5, 0x31, 0x8f, 0x59, 0x92, 0x35, 0x9c, 0x9d, 0xc8, 0x48, 0x06, 0xf2, 0x94, + 0xa9, 0xb5, 0xbb, 0x4b, 0xc5, 0x1d, 0x16, 0x51, 0x6b, 0xbe, 0x52, 0xb1, 0x7d, 0xbb, 0xca, 0x99, + 0x12, 0x67, 0xe4, 0x07, 0xa2, 0xbf, 0x70, 0xec, 0x09, 0x20, 0xb6, 0xa6, 0x94, 0x93, 0x5f, 0xb1, + 0xac, 0xdf, 0xb1, 0xa2, 0x59, 0xc0, 0xfd, 0x29, 0x5d, 0x21, 0xdc, 0xf9, 0x1b, 0x21, 0x76, 0x46, + 0x74, 0x4a, 0x7e, 0xe6, 0x75, 0x3f, 0xd7, 0x81, 0x76, 0x14, 0xb8, 0x21, 0xf3, 0x03, 0x0e, 0x77, + 0x81, 0x4e, 0x5c, 0x37, 0xa2, 0x71, 0x4c, 0xe3, 0x96, 0xd2, 0xa9, 0xf7, 0x74, 0xbb, 0x99, 0x26, + 0xa6, 0x7e, 0x58, 0x80, 0xb8, 0xba, 0x87, 0x14, 0x00, 0x87, 0x05, 0xae, 0xcf, 0x7d, 0x16, 0xc4, + 0xad, 0x8d, 0x8e, 0xd2, 0x6b, 0xec, 0xf5, 0xd1, 0x1f, 0xfd, 0x45, 0x45, 0xa5, 0x07, 0x25, 0xd1, + 0x86, 0xa7, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5e, 0x12, 0x86, 0x3d, 0xa0, 0x8d, 0x58, + 0xcc, 0x03, 0x32, 0xa5, 0xad, 0x7a, 0x47, 0xe9, 0xe9, 0xf6, 0x56, 0x9a, 0x98, 0xda, 0xe3, 0x1c, + 0xc3, 0xe5, 0x2d, 0x1c, 0x00, 0x9d, 0x93, 0xc8, 0xa3, 0x1c, 0xd3, 0x93, 0xd6, 0xa6, 0xec, 0xe7, + 0xda, 0x72, 0x3f, 0xe2, 0x85, 0xd0, 0xbc, 0x8f, 0x5e, 0x0c, 0xdf, 0x53, 0x47, 0x24, 0xd1, 0x88, + 0x06, 0x0e, 0xcd, 0x46, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, + 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x0e, 0x88, 0x8e, 0x73, 0xde, 0x51, + 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x0f, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x40, 0xf3, 0x4a, + 0x32, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0x31, 0x2c, 0x16, 0x47, 0xf8, 0x0f, 0x50, 0xe7, + 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0xc1, 0xdd, 0x8d, 0x03, 0xa5, 0xbb, 0x0f, 0xe0, 0xaa, + 0xa7, 0xd0, 0x04, 0x6a, 0x44, 0x89, 0x9b, 0x69, 0x68, 0xb6, 0x9e, 0x26, 0xa6, 0x8a, 0x05, 0x80, + 0x33, 0xbc, 0xfb, 0x49, 0x01, 0x5b, 0x05, 0x6f, 0xc0, 0x22, 0x0e, 0xff, 0x03, 0x9b, 0xd2, 0x61, + 0x59, 0xd4, 0xd6, 0xd2, 0xc4, 0xdc, 0x7c, 0x2e, 0xdc, 0x95, 0x28, 0x7c, 0x04, 0x34, 0xb9, 0x2d, + 0x0e, 0x9b, 0x64, 0x2d, 0xd8, 0xbb, 0x62, 0x98, 0x41, 0x8e, 0x5d, 0x26, 0xe6, 0xbf, 0xab, 0x3f, + 0x01, 0x15, 0xd7, 0xb8, 0x24, 0x8b, 0x32, 0x21, 0x8b, 0xb8, 0x7c, 0x48, 0x35, 0x2b, 0x23, 0xca, + 0x63, 0x89, 0x76, 0xbf, 0x6e, 0x80, 0x66, 0xd1, 0xd5, 0xab, 0x89, 0xef, 0x50, 0xf8, 0x0e, 0x68, + 0xe2, 0x87, 0xb8, 0x84, 0x13, 0xd9, 0x5a, 0x63, 0xef, 0xd6, 0xd2, 0x03, 0x94, 0x8b, 0x8e, 0xc2, + 0xb1, 0x27, 0x80, 0x18, 0x89, 0xec, 0xea, 0x8d, 0x9f, 0x51, 0x4e, 0xaa, 0x05, 0xab, 0x30, 0x5c, + 0xaa, 0xc2, 0xfb, 0xa0, 0x91, 0xaf, 0xf4, 0xf1, 0x22, 0xa4, 0x72, 0x6d, 0x74, 0xdb, 0x48, 0x13, + 0xb3, 0x71, 0x58, 0xc1, 0x97, 0x57, 0x43, 0xbc, 0x4c, 0x81, 0xaf, 0x81, 0x4e, 0xf3, 0xa6, 0xc5, + 0x37, 0x10, 0x5b, 0x72, 0x7d, 0xcd, 0x2d, 0xb1, 0x77, 0xf2, 0xde, 0xf4, 0x02, 0x89, 0x71, 0x25, + 0x06, 0x07, 0x40, 0x15, 0xbe, 0xc4, 0xad, 0xba, 0x54, 0xdd, 0x5d, 0x53, 0x55, 0x38, 0x6a, 0x37, + 0x73, 0x65, 0x55, 0x44, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xc5, 0xe1, 0xa7, 0x7e, + 0xcc, 0xe1, 0xdb, 0x15, 0x97, 0xd1, 0x7a, 0x2e, 0x0b, 0xb6, 0xf4, 0xb8, 0xdc, 0xef, 0x02, 0x59, + 0x72, 0xf8, 0x25, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd6, 0x9c, 0x42, 0xb6, 0x57, 0x8d, + 0xf1, 0x44, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, 0x9d, 0x5f, + 0x18, 0xb5, 0x0f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, 0x28, 0xdf, + 0x52, 0x43, 0xf9, 0xf8, 0xdd, 0xa8, 0xbd, 0xd1, 0x0a, 0xcd, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x04, 0x9d, 0x1a, 0x33, 0x3e, 0x06, 0x00, 0x00, +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Topology) > 0 { + keysForTopology := make([]string, 0, len(m.Topology)) + for k := range m.Topology { + keysForTopology = append(keysForTopology, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- { + v := m.Topology[string(keysForTopology[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForTopology[iNdEx]) + copy(dAtA[i:], keysForTopology[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTopology[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Hostname != nil { + i -= len(*m.Hostname) + copy(dAtA[i:], *m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Hostname))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointConditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ready != nil { + i-- + if *m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AddressType != nil { + i -= len(*m.AddressType) + copy(dAtA[i:], *m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AddressType))) + i-- + dAtA[i] = 0x22 + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Topology) > 0 { + for k, v := range m.Topology { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *EndpointConditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ready != nil { + n += 2 + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *EndpointSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AddressType != nil { + l = len(*m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + keysForTopology := make([]string, 0, len(this.Topology)) + for k := range this.Topology { + keysForTopology = append(keysForTopology, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + mapStringForTopology := "map[string]string{" + for _, k := range keysForTopology { + mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k]) + } + mapStringForTopology += "}" + s := strings.Join([]string{`&Endpoint{`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`, + `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `Topology:` + mapStringForTopology + `,`, + `}`, + }, "") + return s +} +func (this *EndpointConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointConditions{`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSlice) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `AddressType:` + valueToStringGenerated(this.AddressType) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EndpointSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &v1.ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Topology == nil { + m.Topology = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Topology[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Ready = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AddressType(dAtA[iNdEx:postIndex]) + m.AddressType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EndpointSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto new file mode 100644 index 00000000..f14f37fd --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto @@ -0,0 +1,148 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.discovery.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Endpoint represents a single logical "backend" implementing a service. +message Endpoint { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. This + // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers + // (e.g. kube-proxy) must handle different types of addresses in the context + // of their own capabilities. This must contain at least one address but no + // more than 100. + // +listType=set + repeated string addresses = 1; + + // conditions contains information about the current status of the endpoint. + optional EndpointConditions conditions = 2; + + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + optional string hostname = 3; + + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + map topology = 5; +} + +// EndpointConditions represents the current condition of an endpoint. +message EndpointConditions { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + optional bool ready = 1; +} + +// EndpointPort represents a Port used by an EndpointSlice +message EndpointPort { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass IANA_SVC_NAME validation: + // * must be no more than 15 characters long + // * may contain only [-a-z0-9] + // * must contain at least one letter [a-z] + // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Default is empty string. + optional string name = 1; + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + optional string protocol = 2; + + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + optional int32 port = 3; +} + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +message EndpointSlice { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. + // Default is IP + // +optional + optional string addressType = 4; + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + repeated Endpoint endpoints = 2; + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + repeated EndpointPort ports = 3; +} + +// EndpointSliceList represents a list of endpoint slices +message EndpointSliceList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoint slices + // +listType=set + repeated EndpointSlice items = 2; +} + diff --git a/vendor/k8s.io/api/discovery/v1alpha1/register.go b/vendor/k8s.io/api/discovery/v1alpha1/register.go new file mode 100644 index 00000000..55b73f99 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EndpointSlice{}, + &EndpointSliceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types.go b/vendor/k8s.io/api/discovery/v1alpha1/types.go new file mode 100644 index 00000000..0de6082a --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/types.go @@ -0,0 +1,144 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +type EndpointSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. + // Default is IP + // +optional + AddressType *AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` +} + +// AddressType represents the type of address referred to by an endpoint. +type AddressType string + +const ( + // AddressTypeIP represents an IP Address. + AddressTypeIP = AddressType("IP") +) + +// Endpoint represents a single logical "backend" implementing a service. +type Endpoint struct { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. This + // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers + // (e.g. kube-proxy) must handle different types of addresses in the context + // of their own capabilities. This must contain at least one address but no + // more than 100. + // +listType=set + Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. + Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` +} + +// EndpointConditions represents the current condition of an endpoint. +type EndpointConditions struct { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` +} + +// EndpointPort represents a Port used by an EndpointSlice +type EndpointPort struct { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass IANA_SVC_NAME validation: + // * must be no more than 15 characters long + // * may contain only [-a-z0-9] + // * must contain at least one letter [a-z] + // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Default is empty string. + Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSliceList represents a list of endpoint slices +type EndpointSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of endpoint slices + // +listType=set + Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..a524bcd6 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,85 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Endpoint = map[string]string{ + "": "Endpoint represents a single logical \"backend\" implementing a service.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. This allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers (e.g. kube-proxy) must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "conditions": "conditions contains information about the current status of the endpoint.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", +} + +func (Endpoint) SwaggerDoc() map[string]string { + return map_Endpoint +} + +var map_EndpointConditions = map[string]string{ + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", +} + +func (EndpointConditions) SwaggerDoc() map[string]string { + return map_EndpointConditions +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort represents a Port used by an EndpointSlice", + "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass IANA_SVC_NAME validation: * must be no more than 15 characters long * may contain only [-a-z0-9] * must contain at least one letter [a-z] * it must not start or end with a hyphen, nor contain adjacent hyphens Default is empty string.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSlice = map[string]string{ + "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "metadata": "Standard object's metadata.", + "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. Default is IP", + "endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", + "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", +} + +func (EndpointSlice) SwaggerDoc() map[string]string { + return map_EndpointSlice +} + +var map_EndpointSliceList = map[string]string{ + "": "EndpointSliceList represents a list of endpoint slices", + "metadata": "Standard list metadata.", + "items": "List of endpoint slices", +} + +func (EndpointSliceList) SwaggerDoc() map[string]string { + return map_EndpointSliceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go new file mode 100644 index 00000000..850cd205 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" +) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..e424fccf --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,195 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Conditions.DeepCopyInto(&out.Conditions) + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions. +func (in *EndpointConditions) DeepCopy() *EndpointConditions { + if in == nil { + return nil + } + out := new(EndpointConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.AddressType != nil { + in, out := &in.AddressType, &out.AddressType + *out = new(AddressType) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice. +func (in *EndpointSlice) DeepCopy() *EndpointSlice { + if in == nil { + return nil + } + out := new(EndpointSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList. +func (in *EndpointSliceList) DeepCopy() *EndpointSliceList { + if in == nil { + return nil + } + out := new(EndpointSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index bb0c881b..0e9a8e78 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -17,29 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto - - It has these top-level messages: - Event - EventList - EventSeries -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,27 +44,159 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{2} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_EventSeries proto.InternalMessageInfo func init() { proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_4f97f691c32a5ac8) +} + +var fileDescriptor_4f97f691c32a5ac8 = []byte{ + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, + 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, + 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, + 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, + 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, + 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, + 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, + 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, + 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, + 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, + 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, + 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, + 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, + 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, + 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, + 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, + 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, + 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, + 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, + 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, + 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, + 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, + 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, + 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, + 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, + 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, + 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, + 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, + 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, + 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, + 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, + 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, + 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, + 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, + 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, + 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, + 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, + 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, + 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, + 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, + 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, + 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, + 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, + 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, + 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, + 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, + 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, + 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, + 0x00, +} + func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -80,112 +204,139 @@ func (m *Event) Marshal() (dAtA []byte, err error) { } func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n2, err := m.EventTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + i-- + dAtA[i] = 0x78 + { + size, err := m.DeprecatedLastTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - if m.Series != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n3, err := m.Series.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x72 + { + size, err := m.DeprecatedFirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n3 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) - n4, err := m.Regarding.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x6a + { + size, err := m.DeprecatedSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x62 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x5a + i -= len(m.Note) + copy(dAtA[i:], m.Note) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i-- + dAtA[i] = 0x52 if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n5, err := m.Related.MarshalTo(dAtA[i:]) + } + { + size, err := m.Regarding.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n5 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) - i += copy(dAtA[i:], m.Note) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) - n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x42 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x3a + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x32 + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x2a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x22 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n6 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) - n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) - n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - dAtA[i] = 0x78 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -193,37 +344,46 @@ func (m *EventList) Marshal() (dAtA []byte, err error) { } func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -231,38 +391,51 @@ func (m *EventSeries) Marshal() (dAtA []byte, err error) { } func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ + i -= len(m.State) + copy(dAtA[i:], m.State) i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Event) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -302,6 +475,9 @@ func (m *Event) Size() (n int) { } func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -316,6 +492,9 @@ func (m *EventList) Size() (n int) { } func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Count)) @@ -327,14 +506,7 @@ func (m *EventSeries) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -344,20 +516,20 @@ func (this *Event) String() string { return "nil" } s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `Action:` + fmt.Sprintf("%v", this.Action) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, + `Regarding:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Regarding), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "v11.ObjectReference", 1) + `,`, `Note:` + fmt.Sprintf("%v", this.Note) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, - `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedSource), "EventSource", "v11.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedFirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedLastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, `}`, }, "") @@ -367,9 +539,14 @@ func (this *EventList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -380,7 +557,7 @@ func (this *EventSeries) String() string { } s := strings.Join([]string{`&EventSeries{`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, `}`, }, "") @@ -409,7 +586,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -437,7 +614,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -446,6 +623,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -467,7 +647,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -476,6 +656,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -497,7 +680,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -506,6 +689,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -530,7 +716,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -540,6 +726,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -559,7 +748,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -569,6 +758,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -588,7 +780,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -598,6 +790,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -617,7 +812,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -627,6 +822,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -646,7 +844,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -655,6 +853,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -676,7 +877,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -685,11 +886,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Related == nil { - m.Related = &k8s_io_api_core_v1.ObjectReference{} + m.Related = &v11.ObjectReference{} } if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -709,7 +913,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -719,6 +923,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -738,7 +945,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -748,6 +955,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -767,7 +977,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -776,6 +986,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -797,7 +1010,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -806,6 +1019,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -827,7 +1043,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -836,6 +1052,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -857,7 +1076,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DeprecatedCount |= (int32(b) & 0x7F) << shift + m.DeprecatedCount |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -871,6 +1090,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -898,7 +1120,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -926,7 +1148,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -935,6 +1157,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -956,7 +1181,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -965,6 +1190,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -982,6 +1210,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1009,7 +1240,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1037,7 +1268,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1056,7 +1287,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1065,6 +1296,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1086,7 +1320,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1096,6 +1330,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1110,6 +1347,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1176,10 +1416,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1208,6 +1451,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1226,62 +1472,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 801 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, - 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, - 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, - 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, - 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, - 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, - 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, - 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, - 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, - 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, - 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, - 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, - 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, - 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, - 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, - 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, - 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, - 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, - 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, - 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, - 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, - 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, - 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, - 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, - 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, - 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, - 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, - 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, - 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, - 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, - 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, - 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, - 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, - 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, - 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, - 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, - 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, - 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, - 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, - 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, - 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, - 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, - 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, - 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, - 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, - 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, - 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, - 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, - 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index b3e565e6..58f5aa42 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -98,7 +98,7 @@ message Event { // EventList is a list of Event objects. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -116,6 +116,7 @@ message EventSeries { optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; // Information whether this series is ongoing or finished. + // Deprecated. Planned removal for 1.18 optional string state = 3; } diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index dc48ddb0..0571fbb2 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -43,11 +43,11 @@ type Event struct { // ID of the controller instance, e.g. `kubelet-xyzf`. // +optional - ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` + ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` // What action was taken/failed regarding to the regarding object. // +optional - Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` + Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` // Why the action was taken. Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` @@ -96,6 +96,7 @@ type EventSeries struct { // Time when last Event from the series was seen before last heartbeat. LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` // Information whether this series is ongoing or finished. + // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` } @@ -113,7 +114,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index a15672c1..639daca6 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of Event objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -63,7 +63,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", - "state": "Information whether this series is ongoing or finished.", + "state": "Information whether this series is ongoing or finished. Deprecated. Planned removal for 1.18", } func (EventSeries) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go index e52e142c..779ebaf6 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -70,7 +70,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 6802be28..65b47eab 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -17,87 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto - - It has these top-level messages: - AllowedCSIDriver - AllowedFlexVolume - AllowedHostPath - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - FSGroupStrategyOptions - HTTPIngressPath - HTTPIngressRuleValue - HostPortRange - IDRange - IPBlock - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - ReplicationControllerDummy - RollbackConfig - RollingUpdateDaemonSet - RollingUpdateDeployment - RunAsGroupStrategyOptions - RunAsUserStrategyOptions - SELinuxStrategyOptions - Scale - ScaleSpec - ScaleStatus - SupplementalGroupsStrategyOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -110,244 +49,1602 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{3} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{4} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{5} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{6} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{7} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{8} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{9} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{10} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{11} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{12} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{13} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{14} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{15} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{16} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{17} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{18} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{19} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{20} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} + +var xxx_messageInfo_IDRange proto.InternalMessageInfo + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{21} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{22} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_Ingress proto.InternalMessageInfo -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{23} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{24} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressList proto.InternalMessageInfo + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{25} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRule proto.InternalMessageInfo + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{26} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{27} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{28} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{29} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{30} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptor_cdc93917efc28165, []int{31} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) } +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo + func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{32} + return fileDescriptor_cdc93917efc28165, []int{32} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{33} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{34} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{35} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{36} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{37} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{38} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{39} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) +} -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{40} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{41} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{42} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{45} + return fileDescriptor_cdc93917efc28165, []int{45} +} +func (m *ReplicationControllerDummy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationControllerDummy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerDummy.Merge(m, src) +} +func (m *ReplicationControllerDummy) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerDummy) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerDummy.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerDummy proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{46} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() } +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{47} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptor_cdc93917efc28165, []int{48} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{49} + return fileDescriptor_cdc93917efc28165, []int{49} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{50} + return fileDescriptor_cdc93917efc28165, []int{50} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{51} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{52} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) } -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{53} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{54} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{55} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{55} + return fileDescriptor_cdc93917efc28165, []int{56} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) } +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + func init() { proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") @@ -362,6 +1659,7 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") @@ -400,16 +1698,258 @@ func init() { proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_cdc93917efc28165) +} + +var fileDescriptor_cdc93917efc28165 = []byte{ + // 3684 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4f, 0x6c, 0x1b, 0x47, + 0x77, 0xf7, 0x92, 0x94, 0x48, 0x3d, 0xfd, 0x1f, 0xc9, 0x12, 0x3f, 0x3b, 0x16, 0xfd, 0x6d, 0x00, + 0xd7, 0x49, 0x6d, 0x32, 0x76, 0x6c, 0x7f, 0xae, 0x8d, 0x7e, 0x89, 0x28, 0x59, 0xb6, 0x52, 0xfd, + 0x61, 0x86, 0x92, 0x1b, 0x04, 0x4d, 0x9a, 0x15, 0x39, 0xa2, 0xd6, 0x5a, 0xee, 0x6e, 0x76, 0x87, + 0x8a, 0x08, 0xf4, 0xd0, 0x43, 0x51, 0xa0, 0x40, 0x8b, 0xf6, 0x92, 0xb6, 0xc7, 0x06, 0x05, 0x7a, + 0x6a, 0xd1, 0xde, 0xda, 0x43, 0x10, 0xa0, 0x40, 0x0a, 0x18, 0x45, 0x5a, 0xe4, 0xd6, 0x9c, 0x84, + 0x46, 0x39, 0x15, 0x3d, 0xf5, 0x56, 0xf8, 0x50, 0x14, 0x33, 0x3b, 0xfb, 0x7f, 0x57, 0x5c, 0x29, + 0xb6, 0xd0, 0x00, 0xbd, 0x89, 0xf3, 0xde, 0xfb, 0xbd, 0x37, 0x33, 0x6f, 0xde, 0x7b, 0x33, 0xfb, + 0x04, 0x2b, 0xfb, 0xf7, 0xed, 0xaa, 0x6a, 0xd4, 0xf6, 0x7b, 0x3b, 0xc4, 0xd2, 0x09, 0x25, 0x76, + 0xed, 0x80, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x48, 0x89, 0x6e, 0xab, + 0x86, 0x6e, 0xd7, 0x0e, 0x6e, 0xed, 0x10, 0xaa, 0xdc, 0xaa, 0x75, 0x88, 0x4e, 0x2c, 0x85, 0x92, + 0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x2b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, 0xcf, 0x5e, 0x15, + 0xec, 0x97, 0x6e, 0x76, 0x54, 0xba, 0xd7, 0xdb, 0xa9, 0xb6, 0x8c, 0x6e, 0xad, 0x63, 0x74, 0x8c, + 0x1a, 0x97, 0xda, 0xe9, 0xed, 0xf2, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0xbb, 0x24, 0x07, 0x94, + 0xb7, 0x0c, 0x8b, 0xd4, 0x0e, 0x62, 0x1a, 0x2f, 0xdd, 0xf1, 0x79, 0xba, 0x4a, 0x6b, 0x4f, 0xd5, + 0x89, 0xd5, 0xaf, 0x99, 0xfb, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, 0x52, 0xb5, 0x34, + 0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xf7, 0x06, 0x09, 0xd8, 0xad, 0x3d, 0xd2, 0x55, + 0x62, 0x72, 0x6f, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, 0x8a, 0x0a, 0xc9, + 0x77, 0x60, 0x6a, 0x51, 0xd3, 0x8c, 0xcf, 0x48, 0x7b, 0xa9, 0xb9, 0xba, 0x6c, 0xa9, 0x07, 0xc4, + 0x42, 0x57, 0xa1, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0xae, 0x4a, 0xd7, 0x47, 0xea, 0x63, 0xcf, 0x8f, + 0x2a, 0x17, 0x8e, 0x8f, 0x2a, 0x85, 0x0d, 0xa5, 0x4b, 0x30, 0xa7, 0xc8, 0x0f, 0x61, 0x5a, 0x48, + 0xad, 0x68, 0xe4, 0xf0, 0xa9, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x83, 0xe1, 0x36, 0x07, 0x10, 0x82, + 0x13, 0x42, 0x70, 0xd8, 0x81, 0xc5, 0x82, 0x2a, 0xdb, 0x30, 0x29, 0x84, 0x9f, 0x18, 0x36, 0x6d, + 0x28, 0x74, 0x0f, 0xdd, 0x06, 0x30, 0x15, 0xba, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, 0x91, + 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, 0xd6, + 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, 0xf2, + 0xe7, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0x84, 0xa2, 0x4f, 0xa0, 0xc4, 0xb6, 0xab, + 0xad, 0x50, 0x85, 0x6b, 0x1b, 0xbd, 0xfd, 0x56, 0xd5, 0x77, 0x27, 0x6f, 0xf5, 0xaa, 0xe6, 0x7e, + 0x87, 0x0d, 0xd8, 0x55, 0xc6, 0x5d, 0x3d, 0xb8, 0x55, 0xdd, 0xdc, 0x79, 0x46, 0x5a, 0x74, 0x9d, + 0x50, 0xc5, 0xb7, 0xcf, 0x1f, 0xc3, 0x1e, 0x2a, 0xda, 0x80, 0x82, 0x6d, 0x92, 0x16, 0xb7, 0x6c, + 0xf4, 0xf6, 0x8d, 0xea, 0x89, 0xce, 0x5a, 0xf5, 0x2c, 0x6b, 0x9a, 0xa4, 0xe5, 0xaf, 0x38, 0xfb, + 0x85, 0x39, 0x0e, 0x7a, 0x0a, 0xc3, 0x36, 0x55, 0x68, 0xcf, 0x2e, 0xe7, 0x39, 0x62, 0x35, 0x33, + 0x22, 0x97, 0xf2, 0x37, 0xc3, 0xf9, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x39, 0x40, 0x1e, 0xef, 0x92, + 0xa1, 0xb7, 0x55, 0xaa, 0x1a, 0x3a, 0x7a, 0x00, 0x05, 0xda, 0x37, 0x5d, 0x17, 0xb8, 0xe6, 0x1a, + 0xb4, 0xd5, 0x37, 0xc9, 0x8b, 0xa3, 0xca, 0x5c, 0x5c, 0x82, 0x51, 0x30, 0x97, 0x41, 0x6b, 0x9e, + 0xa9, 0x39, 0x2e, 0x7d, 0x27, 0xac, 0xfa, 0xc5, 0x51, 0x25, 0xe1, 0xb0, 0x55, 0x3d, 0xa4, 0xb0, + 0x81, 0xe8, 0x00, 0x90, 0xa6, 0xd8, 0x74, 0xcb, 0x52, 0x74, 0xdb, 0xd1, 0xa4, 0x76, 0x89, 0x58, + 0x84, 0x37, 0xb3, 0x6d, 0x1a, 0x93, 0xa8, 0x5f, 0x12, 0x56, 0xa0, 0xb5, 0x18, 0x1a, 0x4e, 0xd0, + 0xc0, 0xbc, 0xd9, 0x22, 0x8a, 0x6d, 0xe8, 0xe5, 0x42, 0xd8, 0x9b, 0x31, 0x1f, 0xc5, 0x82, 0x8a, + 0xde, 0x80, 0x62, 0x97, 0xd8, 0xb6, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, 0xeb, + 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x97, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0xda, 0x14, 0xfd, 0x56, + 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, 0x70, + 0x1d, 0x86, 0x54, 0x4a, 0xba, 0x6c, 0x1f, 0xf2, 0xd7, 0x47, 0x6f, 0x5f, 0xcf, 0xea, 0x32, 0xf5, + 0x71, 0x01, 0x3a, 0xb4, 0xca, 0xc4, 0xb1, 0x83, 0x22, 0xff, 0x69, 0x21, 0x60, 0x3e, 0x73, 0x4d, + 0xf4, 0x11, 0x94, 0x6c, 0xa2, 0x91, 0x16, 0x35, 0x2c, 0x61, 0xfe, 0xdb, 0x19, 0xcd, 0x57, 0x76, + 0x88, 0xd6, 0x14, 0xa2, 0xf5, 0x31, 0x66, 0xbf, 0xfb, 0x0b, 0x7b, 0x90, 0xe8, 0x7d, 0x28, 0x51, + 0xd2, 0x35, 0x35, 0x85, 0x12, 0x71, 0x8e, 0x5e, 0x0f, 0x4e, 0x81, 0x79, 0x0e, 0x03, 0x6b, 0x18, + 0xed, 0x2d, 0xc1, 0xc6, 0x8f, 0x8f, 0xb7, 0x24, 0xee, 0x28, 0xf6, 0x60, 0xd0, 0x01, 0x4c, 0xf4, + 0xcc, 0x36, 0xe3, 0xa4, 0x2c, 0x0a, 0x76, 0xfa, 0xc2, 0x93, 0xee, 0x65, 0x5d, 0x9b, 0xed, 0x90, + 0x74, 0x7d, 0x4e, 0xe8, 0x9a, 0x08, 0x8f, 0xe3, 0x88, 0x16, 0xb4, 0x08, 0x93, 0x5d, 0x55, 0x67, + 0x71, 0xa9, 0xdf, 0x24, 0x2d, 0x43, 0x6f, 0xdb, 0xdc, 0xad, 0x86, 0xea, 0xf3, 0x02, 0x60, 0x72, + 0x3d, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x1e, 0x20, 0x77, 0x1a, 0x8f, 0x9d, 0x20, 0xae, 0x1a, 0x3a, + 0xf7, 0xb9, 0xbc, 0xef, 0xdc, 0x5b, 0x31, 0x0e, 0x9c, 0x20, 0x85, 0xd6, 0x60, 0xd6, 0x22, 0x07, + 0x2a, 0x9b, 0xe3, 0x13, 0xd5, 0xa6, 0x86, 0xd5, 0x5f, 0x53, 0xbb, 0x2a, 0x2d, 0x0f, 0x73, 0x9b, + 0xca, 0xc7, 0x47, 0x95, 0x59, 0x9c, 0x40, 0xc7, 0x89, 0x52, 0xf2, 0x9f, 0x0d, 0xc3, 0x64, 0x24, + 0xde, 0xa0, 0xa7, 0x30, 0xd7, 0xea, 0x59, 0x16, 0xd1, 0xe9, 0x46, 0xaf, 0xbb, 0x43, 0xac, 0x66, + 0x6b, 0x8f, 0xb4, 0x7b, 0x1a, 0x69, 0x73, 0x47, 0x19, 0xaa, 0x2f, 0x08, 0x8b, 0xe7, 0x96, 0x12, + 0xb9, 0x70, 0x8a, 0x34, 0x5b, 0x05, 0x9d, 0x0f, 0xad, 0xab, 0xb6, 0xed, 0x61, 0xe6, 0x38, 0xa6, + 0xb7, 0x0a, 0x1b, 0x31, 0x0e, 0x9c, 0x20, 0xc5, 0x6c, 0x6c, 0x13, 0x5b, 0xb5, 0x48, 0x3b, 0x6a, + 0x63, 0x3e, 0x6c, 0xe3, 0x72, 0x22, 0x17, 0x4e, 0x91, 0x46, 0x77, 0x61, 0xd4, 0xd1, 0xc6, 0xf7, + 0x4f, 0x6c, 0xf4, 0x8c, 0x00, 0x1b, 0xdd, 0xf0, 0x49, 0x38, 0xc8, 0xc7, 0xa6, 0x66, 0xec, 0xd8, + 0xc4, 0x3a, 0x20, 0xed, 0xf4, 0x0d, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0x36, 0x35, 0xc7, 0x03, + 0x63, 0x53, 0x1b, 0x0e, 0x4f, 0x6d, 0x3b, 0x91, 0x0b, 0xa7, 0x48, 0x33, 0x3f, 0x76, 0x4c, 0x5e, + 0x3c, 0x50, 0x54, 0x4d, 0xd9, 0xd1, 0x48, 0xb9, 0x18, 0xf6, 0xe3, 0x8d, 0x30, 0x19, 0x47, 0xf9, + 0xd1, 0x63, 0x98, 0x76, 0x86, 0xb6, 0x75, 0xc5, 0x03, 0x29, 0x71, 0x90, 0x9f, 0x09, 0x90, 0xe9, + 0x8d, 0x28, 0x03, 0x8e, 0xcb, 0xa0, 0x07, 0x30, 0xd1, 0x32, 0x34, 0x8d, 0xfb, 0xe3, 0x92, 0xd1, + 0xd3, 0x69, 0x79, 0x84, 0xa3, 0x20, 0x76, 0x1e, 0x97, 0x42, 0x14, 0x1c, 0xe1, 0x44, 0x04, 0xa0, + 0xe5, 0x26, 0x1c, 0xbb, 0x0c, 0x3c, 0x3e, 0xde, 0xca, 0x1a, 0x03, 0xbc, 0x54, 0xe5, 0xd7, 0x00, + 0xde, 0x90, 0x8d, 0x03, 0xc0, 0xf2, 0x3f, 0x4b, 0x30, 0x9f, 0x12, 0x3a, 0xd0, 0x3b, 0xa1, 0x14, + 0xfb, 0xab, 0x91, 0x14, 0x7b, 0x39, 0x45, 0x2c, 0x90, 0x67, 0x75, 0x18, 0xb7, 0xd8, 0xac, 0xf4, + 0x8e, 0xc3, 0x22, 0x62, 0xe4, 0xdd, 0x01, 0xd3, 0xc0, 0x41, 0x19, 0x3f, 0xe6, 0x4f, 0x1f, 0x1f, + 0x55, 0xc6, 0x43, 0x34, 0x1c, 0x86, 0x97, 0xff, 0x3c, 0x07, 0xb0, 0x4c, 0x4c, 0xcd, 0xe8, 0x77, + 0x89, 0x7e, 0x1e, 0x35, 0xd4, 0x66, 0xa8, 0x86, 0xba, 0x39, 0x68, 0x7b, 0x3c, 0xd3, 0x52, 0x8b, + 0xa8, 0xdf, 0x8c, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x96, 0x87, 0x19, 0x9f, + 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x57, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, 0xea, + 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, 0xcb, + 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc5, 0x9a, 0xed, 0x2b, 0x09, + 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x91, 0xd9, 0x45, 0x53, 0xaa, + 0xb6, 0xff, 0x66, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, 0x43, + 0x9f, 0x4b, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0x83, 0x2a, 0x4e, 0xac, 0x74, 0xcc, 0x5a, 0xcd, + 0x6c, 0x96, 0xab, 0xb1, 0xba, 0x1d, 0xc3, 0x7a, 0xa4, 0x53, 0xab, 0xef, 0x6f, 0x72, 0x9c, 0x01, + 0x27, 0x18, 0x80, 0x14, 0x00, 0x4b, 0x60, 0x6e, 0x19, 0xe2, 0x20, 0xdf, 0xcc, 0x10, 0xf3, 0x98, + 0xc0, 0x92, 0xa1, 0xef, 0xaa, 0x1d, 0x3f, 0xec, 0x60, 0x0f, 0x08, 0x07, 0x40, 0x2f, 0x3d, 0x82, + 0xf9, 0x14, 0x6b, 0xd1, 0x14, 0xe4, 0xf7, 0x49, 0xdf, 0x59, 0x36, 0xcc, 0xfe, 0x44, 0xb3, 0x30, + 0x74, 0xa0, 0x68, 0x3d, 0x27, 0xfc, 0x8e, 0x60, 0xe7, 0xc7, 0x83, 0xdc, 0x7d, 0x49, 0xfe, 0x72, + 0x28, 0xe8, 0x3b, 0xbc, 0x62, 0xbe, 0xce, 0x2e, 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42, + 0x63, 0xce, 0x85, 0xd5, 0x19, 0xc3, 0x1e, 0x35, 0x54, 0x5b, 0xe7, 0x5e, 0x6d, 0x6d, 0x9d, 0x7f, + 0x39, 0xb5, 0xf5, 0x6f, 0x43, 0xc9, 0x76, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, 0x2a, + 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, 0x97, + 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x9b, 0xb4, 0x79, 0x6c, 0x2b, 0xf9, 0xf1, 0xa6, 0xc1, + 0x47, 0xb1, 0xa0, 0xa2, 0x8f, 0x42, 0x2e, 0x5b, 0x3a, 0x8b, 0xcb, 0x4e, 0xa4, 0xbb, 0x2b, 0xda, + 0x86, 0x79, 0xd3, 0x32, 0x3a, 0x16, 0xb1, 0xed, 0x65, 0xa2, 0xb4, 0x35, 0x55, 0x27, 0xee, 0xfa, + 0x38, 0x15, 0xd1, 0xe5, 0xe3, 0xa3, 0xca, 0x7c, 0x23, 0x99, 0x05, 0xa7, 0xc9, 0xca, 0xcf, 0x0b, + 0x30, 0x15, 0xcd, 0x80, 0x29, 0x45, 0xaa, 0x74, 0xa6, 0x22, 0xf5, 0x46, 0xe0, 0x30, 0x38, 0x15, + 0x7c, 0xe0, 0x05, 0x27, 0x76, 0x20, 0x16, 0x61, 0x52, 0x44, 0x03, 0x97, 0x28, 0xca, 0x74, 0x6f, + 0xf7, 0xb7, 0xc3, 0x64, 0x1c, 0xe5, 0x47, 0x0f, 0x61, 0xdc, 0xe2, 0x75, 0xb7, 0x0b, 0xe0, 0xd4, + 0xae, 0x17, 0x05, 0xc0, 0x38, 0x0e, 0x12, 0x71, 0x98, 0x97, 0xd5, 0xad, 0x7e, 0x39, 0xea, 0x02, + 0x14, 0xc2, 0x75, 0xeb, 0x62, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x3a, 0xcc, 0xf4, 0xf4, 0x38, 0x94, + 0xe3, 0xca, 0x97, 0x05, 0xd4, 0xcc, 0x76, 0x9c, 0x05, 0x27, 0xc9, 0xa1, 0xdd, 0x50, 0x29, 0x3b, + 0xcc, 0xc3, 0xf3, 0xed, 0xcc, 0x07, 0x2f, 0x73, 0x2d, 0x9b, 0x50, 0x6e, 0x97, 0xb2, 0x96, 0xdb, + 0xf2, 0x3f, 0x4a, 0xc1, 0x24, 0xe4, 0x95, 0xc0, 0x83, 0x5e, 0x99, 0x62, 0x12, 0x81, 0xea, 0xc8, + 0x48, 0xae, 0x7e, 0xef, 0x9d, 0xaa, 0xfa, 0xf5, 0x93, 0xe7, 0xe0, 0xf2, 0xf7, 0x0b, 0x09, 0xe6, + 0x56, 0x9a, 0x8f, 0x2d, 0xa3, 0x67, 0xba, 0xe6, 0x6c, 0x9a, 0xce, 0xd2, 0xfc, 0x02, 0x0a, 0x56, + 0x4f, 0x73, 0xe7, 0xf1, 0xba, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0x66, 0x22, 0x52, 0xce, 0x24, + 0x98, 0x00, 0xda, 0x80, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0xd7, 0x06, 0x58, 0xbf, 0xba, + 0x8c, 0x19, 0x7b, 0xa0, 0xb0, 0xe1, 0xd2, 0x58, 0xa0, 0xc8, 0x7f, 0x24, 0xc1, 0xe4, 0x93, 0xad, + 0xad, 0xc6, 0xaa, 0xce, 0x4f, 0x34, 0x7f, 0x5b, 0xbd, 0x0a, 0x05, 0x53, 0xa1, 0x7b, 0xd1, 0x4c, + 0xcf, 0x68, 0x98, 0x53, 0xd0, 0x07, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80, + 0xaf, 0x3b, 0x42, 0x7e, 0xf5, 0x24, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc3, 0x6c, 0xc0, 0x1c, 0xb6, + 0x1e, 0x4f, 0x59, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0x78, 0xcc, 0x8c, + 0x4c, 0xc9, 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x3a, 0x8c, 0xf3, 0x07, 0x65, 0xc3, + 0xa2, 0x7c, 0x59, 0xd0, 0x15, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39, + 0x82, 0x8d, 0x73, 0xb2, 0x72, 0x28, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x31, 0x1b, 0x97, 0x1f, 0x43, + 0x51, 0x2c, 0x77, 0x10, 0x28, 0x7f, 0x32, 0x50, 0x3e, 0x01, 0x68, 0x13, 0x8a, 0xab, 0x8d, 0xba, + 0x66, 0x38, 0x55, 0x57, 0x4b, 0x6d, 0x5b, 0xd1, 0xbd, 0x58, 0x5a, 0x5d, 0xc6, 0x98, 0x53, 0x90, + 0x0c, 0xc3, 0xe4, 0xb0, 0x45, 0x4c, 0xca, 0x3d, 0x62, 0xa4, 0x0e, 0x6c, 0x97, 0x1f, 0xf1, 0x11, + 0x2c, 0x28, 0xf2, 0x1f, 0xe7, 0xa0, 0x28, 0x96, 0xe3, 0x1c, 0x6e, 0x61, 0x6b, 0xa1, 0x5b, 0xd8, + 0x9b, 0xd9, 0x5c, 0x23, 0xf5, 0x0a, 0xb6, 0x15, 0xb9, 0x82, 0xdd, 0xc8, 0x88, 0x77, 0xf2, 0xfd, + 0xeb, 0xef, 0x24, 0x98, 0x08, 0x3b, 0x25, 0xba, 0x0b, 0xa3, 0x2c, 0xe1, 0xa8, 0x2d, 0xb2, 0xe1, + 0xd7, 0xb9, 0xde, 0x23, 0x4c, 0xd3, 0x27, 0xe1, 0x20, 0x1f, 0xea, 0x78, 0x62, 0xcc, 0x8f, 0xc4, + 0xa4, 0xd3, 0x97, 0xb4, 0x47, 0x55, 0xad, 0xea, 0x7c, 0x5a, 0xa9, 0xae, 0xea, 0x74, 0xd3, 0x6a, + 0x52, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, 0xc8, 0xf2, 0x3f, 0x48, 0x30, 0x2a, 0x4c, + 0x3e, 0x87, 0x5b, 0xc5, 0x6f, 0x84, 0x6f, 0x15, 0xd7, 0x32, 0x1e, 0xf0, 0xe4, 0x2b, 0xc5, 0x5f, + 0xf9, 0xa6, 0xb3, 0x23, 0xcd, 0xbc, 0x7a, 0xcf, 0xb0, 0x69, 0xd4, 0xab, 0xd9, 0x61, 0xc4, 0x9c, + 0x82, 0x7a, 0x30, 0xa5, 0x46, 0x62, 0x80, 0x58, 0xda, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, + 0xc0, 0x4f, 0x45, 0x29, 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x43, 0x61, 0x8f, 0x52, + 0x33, 0xe1, 0xbd, 0x7a, 0x40, 0xe4, 0xf1, 0x4d, 0x28, 0xf1, 0xd9, 0x6d, 0x6d, 0x35, 0x30, 0x87, + 0x92, 0xff, 0xc7, 0x5f, 0x8f, 0xa6, 0xe3, 0xe3, 0x5e, 0x3c, 0x95, 0xce, 0x12, 0x4f, 0x47, 0x93, + 0x62, 0x29, 0x7a, 0x02, 0x79, 0xaa, 0x65, 0xbd, 0x16, 0x0a, 0xc4, 0xad, 0xb5, 0xa6, 0x1f, 0x90, + 0xb6, 0xd6, 0x9a, 0x98, 0x41, 0xa0, 0x4d, 0x18, 0x62, 0xd9, 0x87, 0x1d, 0xc1, 0x7c, 0xf6, 0x23, + 0xcd, 0xe6, 0xef, 0x3b, 0x04, 0xfb, 0x65, 0x63, 0x07, 0x47, 0xfe, 0x14, 0xc6, 0x43, 0xe7, 0x14, + 0x7d, 0x02, 0x63, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x7e, 0x1c, 0xb8, 0x96, + 0x74, 0xc3, 0x58, 0x0b, 0xf0, 0x89, 0x53, 0x3e, 0x2b, 0x94, 0x8c, 0x05, 0x69, 0x38, 0x84, 0x28, + 0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0x9f, 0x39, 0xf9, 0x64, 0xa4, 0x3e, 0xc2, 0x2c, + 0x64, 0xee, 0x67, 0x63, 0x67, 0x1c, 0xdd, 0x06, 0xb0, 0x49, 0xcb, 0x22, 0x94, 0x07, 0x83, 0x5c, + 0xf8, 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x4f, 0x12, 0x8c, 0x6f, 0x10, 0xfa, 0x99, + 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0xd8, 0xe2, 0x50, 0xb0, 0x7d, 0x6b, 0xc0, + 0xce, 0x84, 0xac, 0x4b, 0x0b, 0xb9, 0xf2, 0x57, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x47, 0x77, + 0x1b, 0x86, 0x4c, 0xc3, 0xa2, 0x6e, 0x22, 0x3e, 0x95, 0x42, 0x16, 0xc6, 0x02, 0xa9, 0x98, 0xc1, + 0x60, 0x07, 0x0d, 0xad, 0x41, 0x8e, 0x1a, 0xc2, 0x55, 0x4f, 0x87, 0x49, 0x88, 0x55, 0x07, 0x81, + 0x99, 0xdb, 0x32, 0x70, 0x8e, 0x1a, 0x6c, 0x23, 0xca, 0x21, 0xae, 0x60, 0xf0, 0x79, 0x45, 0x33, + 0xc0, 0x50, 0xd8, 0xb5, 0x8c, 0xee, 0x99, 0xe7, 0xe0, 0x6d, 0xc4, 0x8a, 0x65, 0x74, 0x31, 0xc7, + 0x92, 0xbf, 0x96, 0x60, 0x3a, 0xc4, 0x79, 0x0e, 0x81, 0xff, 0xfd, 0x70, 0xe0, 0xbf, 0x71, 0x9a, + 0x89, 0xa4, 0x84, 0xff, 0xaf, 0x73, 0x91, 0x69, 0xb0, 0x09, 0xa3, 0x5d, 0x18, 0x35, 0x8d, 0x76, + 0xf3, 0x25, 0x7c, 0x0e, 0x9c, 0x64, 0x79, 0xb3, 0xe1, 0x63, 0xe1, 0x20, 0x30, 0x3a, 0x84, 0x69, + 0x5d, 0xe9, 0x12, 0xdb, 0x54, 0x5a, 0xa4, 0xf9, 0x12, 0x1e, 0x48, 0x2e, 0xf2, 0xef, 0x0d, 0x51, + 0x44, 0x1c, 0x57, 0x82, 0xd6, 0xa1, 0xa8, 0x9a, 0xbc, 0x8e, 0x13, 0xb5, 0xcb, 0xc0, 0x2c, 0xea, + 0x54, 0x7d, 0x4e, 0x3c, 0x17, 0x3f, 0xb0, 0x8b, 0x21, 0xff, 0x75, 0xd4, 0x1b, 0x98, 0xff, 0xa1, + 0xc7, 0x50, 0xe2, 0x8d, 0x19, 0x2d, 0x43, 0x73, 0xbf, 0x0c, 0xb0, 0x9d, 0x6d, 0x88, 0xb1, 0x17, + 0x47, 0x95, 0xcb, 0x09, 0x8f, 0xbe, 0x2e, 0x19, 0x7b, 0xc2, 0x68, 0x03, 0x0a, 0xe6, 0x8f, 0xa9, + 0x60, 0x78, 0x92, 0xe3, 0x65, 0x0b, 0xc7, 0x91, 0x7f, 0x2f, 0x1f, 0x31, 0x97, 0xa7, 0xba, 0x67, + 0x2f, 0x6d, 0xd7, 0xbd, 0x8a, 0x29, 0x75, 0xe7, 0x77, 0xa0, 0x28, 0x32, 0xbc, 0x70, 0xe6, 0x5f, + 0x9c, 0xc6, 0x99, 0x83, 0x59, 0xcc, 0xbb, 0xb0, 0xb8, 0x83, 0x2e, 0x30, 0xfa, 0x18, 0x86, 0x89, + 0xa3, 0xc2, 0xc9, 0x8d, 0xf7, 0x4e, 0xa3, 0xc2, 0x8f, 0xab, 0x7e, 0xa1, 0x2a, 0xc6, 0x04, 0x2a, + 0x7a, 0x87, 0xad, 0x17, 0xe3, 0x65, 0x97, 0x40, 0xbb, 0x5c, 0xe0, 0xe9, 0xea, 0x8a, 0x33, 0x6d, + 0x6f, 0xf8, 0xc5, 0x51, 0x05, 0xfc, 0x9f, 0x38, 0x28, 0x21, 0xff, 0x8b, 0x04, 0xd3, 0x7c, 0x85, + 0x5a, 0x3d, 0x4b, 0xa5, 0xfd, 0x73, 0x4b, 0x4c, 0x4f, 0x43, 0x89, 0xe9, 0xce, 0x80, 0x65, 0x89, + 0x59, 0x98, 0x9a, 0x9c, 0xbe, 0x91, 0xe0, 0x62, 0x8c, 0xfb, 0x1c, 0xe2, 0xe2, 0x76, 0x38, 0x2e, + 0xbe, 0x75, 0xda, 0x09, 0xa5, 0xc4, 0xc6, 0xff, 0x9a, 0x4e, 0x98, 0x0e, 0x3f, 0x29, 0xb7, 0x01, + 0x4c, 0x4b, 0x3d, 0x50, 0x35, 0xd2, 0x11, 0x1f, 0xc1, 0x4b, 0x81, 0x16, 0x27, 0x8f, 0x82, 0x03, + 0x5c, 0xc8, 0x86, 0xb9, 0x36, 0xd9, 0x55, 0x7a, 0x1a, 0x5d, 0x6c, 0xb7, 0x97, 0x14, 0x53, 0xd9, + 0x51, 0x35, 0x95, 0xaa, 0xe2, 0xb9, 0x60, 0xa4, 0xfe, 0xd0, 0xf9, 0x38, 0x9d, 0xc4, 0xf1, 0xe2, + 0xa8, 0x72, 0x25, 0xe9, 0xeb, 0x90, 0xcb, 0xd2, 0xc7, 0x29, 0xd0, 0xa8, 0x0f, 0x65, 0x8b, 0x7c, + 0xda, 0x53, 0x2d, 0xd2, 0x5e, 0xb6, 0x0c, 0x33, 0xa4, 0x36, 0xcf, 0xd5, 0xfe, 0xfa, 0xf1, 0x51, + 0xa5, 0x8c, 0x53, 0x78, 0x06, 0x2b, 0x4e, 0x85, 0x47, 0xcf, 0x60, 0x46, 0x11, 0xcd, 0x68, 0x41, + 0xad, 0xce, 0x29, 0xb9, 0x7f, 0x7c, 0x54, 0x99, 0x59, 0x8c, 0x93, 0x07, 0x2b, 0x4c, 0x02, 0x45, + 0x35, 0x28, 0x1e, 0xf0, 0xbe, 0x35, 0xbb, 0x3c, 0xc4, 0xf1, 0x59, 0x22, 0x28, 0x3a, 0xad, 0x6c, + 0x0c, 0x73, 0x78, 0xa5, 0xc9, 0x4f, 0x9f, 0xcb, 0xc5, 0x2e, 0x94, 0xac, 0x96, 0x14, 0x27, 0x9e, + 0xbf, 0x18, 0x97, 0xfc, 0xa8, 0xf5, 0xc4, 0x27, 0xe1, 0x20, 0x1f, 0xfa, 0x08, 0x46, 0xf6, 0xc4, + 0xab, 0x84, 0x5d, 0x2e, 0x66, 0x4a, 0xc2, 0xa1, 0x57, 0x8c, 0xfa, 0xb4, 0x50, 0x31, 0xe2, 0x0e, + 0xdb, 0xd8, 0x47, 0x44, 0x6f, 0x40, 0x91, 0xff, 0x58, 0x5d, 0xe6, 0xcf, 0x71, 0x25, 0x3f, 0xb6, + 0x3d, 0x71, 0x86, 0xb1, 0x4b, 0x77, 0x59, 0x57, 0x1b, 0x4b, 0xfc, 0x59, 0x38, 0xc2, 0xba, 0xda, + 0x58, 0xc2, 0x2e, 0x1d, 0x7d, 0x02, 0x45, 0x9b, 0xac, 0xa9, 0x7a, 0xef, 0xb0, 0x0c, 0x99, 0x3e, + 0x2a, 0x37, 0x1f, 0x71, 0xee, 0xc8, 0xc3, 0x98, 0xaf, 0x41, 0xd0, 0xb1, 0x0b, 0x8b, 0xf6, 0x60, + 0xc4, 0xea, 0xe9, 0x8b, 0xf6, 0xb6, 0x4d, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97, + 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x6f, 0x70, + 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x4f, 0x7d, 0xe2, 0x1d, 0xde, 0x23, 0xe3, 0x00, + 0x36, 0xfa, 0x43, 0x09, 0x90, 0xdd, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x4e, 0x15, 0x8d, 0x8f, 0xda, + 0xe5, 0x31, 0xae, 0xf2, 0xdd, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0x9e, 0xd7, 0xe3, 0xac, + 0x38, 0x41, 0x2f, 0xdb, 0xc4, 0x5d, 0x31, 0xeb, 0xf1, 0x4c, 0x9b, 0x98, 0xfc, 0xba, 0xe9, 0x6f, + 0xa2, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x85, 0x39, 0xb7, 0xc1, 0x12, 0x1b, 0x06, 0x5d, 0x51, 0x35, + 0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x09, 0xee, 0x60, 0x5e, 0x97, 0x09, 0x4e, 0xe4, 0xc2, 0x29, + 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x38, 0xb1, 0x93, 0xeb, 0x45, 0xc7, 0x47, 0x76, 0x4b, 0xd1, 0x9c, + 0x2f, 0x0e, 0x93, 0x5c, 0xc1, 0xeb, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16, + 0xfa, 0x00, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0x35, 0x16, 0xf1, 0x52, 0x15, 0xa4, 0x4a, + 0x23, 0x0a, 0x53, 0x4a, 0xb8, 0xd5, 0xd5, 0x2e, 0x4f, 0x67, 0x7a, 0xf2, 0x8c, 0x74, 0xc8, 0xfa, + 0xcf, 0x1e, 0x11, 0x82, 0x8d, 0x63, 0x1a, 0xd0, 0xef, 0x00, 0x52, 0xa2, 0xdd, 0xb9, 0x76, 0x19, + 0x65, 0x4a, 0x74, 0xb1, 0xb6, 0x5e, 0xdf, 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x61, 0x05, 0xba, + 0x12, 0xe9, 0x28, 0xb6, 0xcb, 0xf3, 0x5c, 0x79, 0x2d, 0x9b, 0x72, 0x4f, 0x2e, 0xf0, 0x61, 0x25, + 0x8a, 0x88, 0xe3, 0x4a, 0xd0, 0x1a, 0xcc, 0x8a, 0xc1, 0x6d, 0xdd, 0x56, 0x76, 0x49, 0xb3, 0x6f, + 0xb7, 0xa8, 0x66, 0x97, 0x67, 0x78, 0x7c, 0xe7, 0x1f, 0xf7, 0x16, 0x13, 0xe8, 0x38, 0x51, 0x0a, + 0xbd, 0x0b, 0x53, 0xbb, 0x86, 0xb5, 0xa3, 0xb6, 0xdb, 0x44, 0x77, 0x91, 0x66, 0x39, 0xd2, 0x2c, + 0xdb, 0x87, 0x95, 0x08, 0x0d, 0xc7, 0xb8, 0x91, 0x0d, 0x17, 0x05, 0x72, 0xc3, 0x32, 0x5a, 0xeb, + 0x46, 0x4f, 0xa7, 0x4e, 0xd9, 0x77, 0xd1, 0x4b, 0xa3, 0x17, 0x17, 0x93, 0x18, 0x5e, 0x1c, 0x55, + 0xae, 0x26, 0x57, 0xf9, 0x3e, 0x13, 0x4e, 0xc6, 0x46, 0x26, 0x8c, 0x89, 0x3e, 0xf1, 0x25, 0x4d, + 0xb1, 0xed, 0x72, 0x99, 0x1f, 0xfd, 0x07, 0x83, 0x03, 0x9e, 0x27, 0x12, 0x3d, 0xff, 0x53, 0xc7, + 0x47, 0x95, 0xb1, 0x20, 0x03, 0x0e, 0x69, 0xe0, 0x7d, 0x41, 0xe2, 0x6b, 0xd4, 0xf9, 0xf4, 0x56, + 0x9f, 0xae, 0x2f, 0xc8, 0x37, 0xed, 0xa5, 0xf5, 0x05, 0x05, 0x20, 0x4f, 0x7e, 0x97, 0xfe, 0xcf, + 0x1c, 0xcc, 0xf8, 0xcc, 0x99, 0xfb, 0x82, 0x12, 0x44, 0xfe, 0xbf, 0xbf, 0x3a, 0x5b, 0xaf, 0x8e, + 0xbf, 0x74, 0xff, 0xf7, 0x7a, 0x75, 0x7c, 0xdb, 0x52, 0x6e, 0x0f, 0x7f, 0x9b, 0x0b, 0x4e, 0xe0, + 0x94, 0x0d, 0x23, 0x2f, 0xa1, 0xc5, 0xf8, 0x27, 0xd7, 0x73, 0x22, 0x7f, 0x93, 0x87, 0xa9, 0xe8, + 0x69, 0x0c, 0xf5, 0x15, 0x48, 0x03, 0xfb, 0x0a, 0x1a, 0x30, 0xbb, 0xdb, 0xd3, 0xb4, 0x3e, 0x9f, + 0x43, 0xa0, 0xb9, 0xc0, 0xf9, 0x2e, 0xf8, 0x9a, 0x90, 0x9c, 0x5d, 0x49, 0xe0, 0xc1, 0x89, 0x92, + 0xf1, 0x36, 0x83, 0xc2, 0x8f, 0x6d, 0x33, 0x18, 0x3a, 0x43, 0x9b, 0x41, 0x72, 0xa7, 0x46, 0xfe, + 0x4c, 0x9d, 0x1a, 0x67, 0xe9, 0x31, 0x48, 0x08, 0x62, 0x03, 0xfb, 0x65, 0x5f, 0x83, 0x4b, 0x42, + 0x8c, 0xf2, 0xde, 0x01, 0x9d, 0x5a, 0x86, 0xa6, 0x11, 0x6b, 0xb9, 0xd7, 0xed, 0xf6, 0xe5, 0x5f, + 0xc2, 0x44, 0xb8, 0x2b, 0xc6, 0xd9, 0x69, 0xa7, 0x31, 0x47, 0x7c, 0x9d, 0x0d, 0xec, 0xb4, 0x33, + 0x8e, 0x3d, 0x0e, 0xf9, 0xf7, 0x25, 0x98, 0x4b, 0xee, 0x7e, 0x45, 0x1a, 0x4c, 0x74, 0x95, 0xc3, + 0x60, 0x47, 0xb2, 0x74, 0xc6, 0x77, 0x33, 0xde, 0x0e, 0xb1, 0x1e, 0xc2, 0xc2, 0x11, 0x6c, 0xf9, + 0x07, 0x09, 0xe6, 0x53, 0x1a, 0x11, 0xce, 0xd7, 0x12, 0xf4, 0x21, 0x94, 0xba, 0xca, 0x61, 0xb3, + 0x67, 0x75, 0xc8, 0x99, 0x5f, 0x0a, 0xf9, 0x71, 0x5f, 0x17, 0x28, 0xd8, 0xc3, 0x93, 0xff, 0x52, + 0x82, 0x9f, 0xa5, 0x5e, 0xa4, 0xd0, 0xbd, 0x50, 0xcf, 0x84, 0x1c, 0xe9, 0x99, 0x40, 0x71, 0xc1, + 0x57, 0xd4, 0x32, 0xf1, 0x85, 0x04, 0xe5, 0xb4, 0x9b, 0x25, 0xba, 0x1b, 0x32, 0xf2, 0xe7, 0x11, + 0x23, 0xa7, 0x63, 0x72, 0xaf, 0xc8, 0xc6, 0x7f, 0x95, 0xe0, 0xf2, 0x09, 0x15, 0x9a, 0x77, 0x81, + 0x21, 0xed, 0x20, 0x17, 0x7f, 0xd4, 0x16, 0x5f, 0xc4, 0xfc, 0x0b, 0x4c, 0x02, 0x0f, 0x4e, 0x95, + 0x46, 0xdb, 0x30, 0x2f, 0x6e, 0x4f, 0x51, 0x9a, 0x28, 0x3e, 0x78, 0x6b, 0xd9, 0x72, 0x32, 0x0b, + 0x4e, 0x93, 0x95, 0xff, 0x46, 0x82, 0xb9, 0xe4, 0x27, 0x03, 0xf4, 0x76, 0x68, 0xc9, 0x2b, 0x91, + 0x25, 0x9f, 0x8c, 0x48, 0x89, 0x05, 0xff, 0x18, 0x26, 0xc4, 0xc3, 0x82, 0x80, 0x11, 0xce, 0x2c, + 0x27, 0xe5, 0x17, 0x01, 0xe1, 0x96, 0xb7, 0xfc, 0x98, 0x84, 0xc7, 0x70, 0x04, 0x4d, 0xfe, 0x83, + 0x1c, 0x0c, 0x35, 0x5b, 0x8a, 0x46, 0xce, 0xa1, 0xba, 0x7d, 0x2f, 0x54, 0xdd, 0x0e, 0xfa, 0xa7, + 0x2d, 0x6e, 0x55, 0x6a, 0x61, 0x8b, 0x23, 0x85, 0xed, 0x9b, 0x99, 0xd0, 0x4e, 0xae, 0x69, 0x7f, + 0x0d, 0x46, 0x3c, 0xa5, 0xa7, 0x4b, 0xb5, 0xf2, 0x5f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, 0x94, 0x89, + 0x7a, 0x37, 0x54, 0x9d, 0xe4, 0x33, 0x3c, 0xe3, 0x04, 0x74, 0x55, 0xdd, 0x7a, 0xc4, 0x69, 0x3a, + 0xf6, 0xdb, 0x4c, 0xe3, 0x65, 0xca, 0x2f, 0x61, 0x82, 0x2a, 0x56, 0x87, 0x50, 0xef, 0xb3, 0x46, + 0x9e, 0xfb, 0xa2, 0xd7, 0xfd, 0xbe, 0x15, 0xa2, 0xe2, 0x08, 0xf7, 0xa5, 0x87, 0x30, 0x1e, 0x52, + 0x76, 0xaa, 0x9e, 0xe1, 0xbf, 0x97, 0xe0, 0xe7, 0x03, 0x9f, 0x82, 0x50, 0x3d, 0x74, 0x48, 0xaa, + 0x91, 0x43, 0xb2, 0x90, 0x0e, 0xf0, 0xea, 0x7a, 0xcf, 0xea, 0x37, 0x9f, 0x7f, 0xbf, 0x70, 0xe1, + 0xdb, 0xef, 0x17, 0x2e, 0x7c, 0xf7, 0xfd, 0xc2, 0x85, 0xdf, 0x3d, 0x5e, 0x90, 0x9e, 0x1f, 0x2f, + 0x48, 0xdf, 0x1e, 0x2f, 0x48, 0xdf, 0x1d, 0x2f, 0x48, 0xff, 0x7e, 0xbc, 0x20, 0xfd, 0xc9, 0x0f, + 0x0b, 0x17, 0x3e, 0x2c, 0x0a, 0xb8, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x62, 0xda, 0xf9, + 0x07, 0x3e, 0x00, 0x00, +} + func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -417,21 +1957,27 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { } func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -439,21 +1985,27 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { } func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -461,29 +2013,35 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { } func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -491,41 +2049,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -533,41 +2102,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -575,37 +2155,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -613,54 +2202,65 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n6, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n6 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n7, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n8, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -668,58 +2268,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -727,31 +2334,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n9, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -759,41 +2374,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n11, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n12, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -801,49 +2427,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n13, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n14, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -851,37 +2490,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n15, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -889,51 +2537,61 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n16, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -941,79 +2599,92 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n17, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n18, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n19, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x38 - i++ + i-- if m.Paused { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n20, err := m.RollbackTo.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n20 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1021,52 +2692,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1074,31 +2752,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1106,33 +2792,41 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1140,29 +2834,37 @@ func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n22, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1170,53 +2872,65 @@ func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1224,23 +2938,28 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1248,36 +2967,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for _, s := range m.Except { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1285,41 +3004,52 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { } func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1327,29 +3057,37 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { } func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n26, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1357,37 +3095,46 @@ func (m *IngressList) Marshal() (dAtA []byte, err error) { } func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1395,29 +3142,37 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { } func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n28, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n28 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1425,27 +3180,34 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n29, err := m.HTTP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1453,51 +3215,62 @@ func (m *IngressSpec) Marshal() (dAtA []byte, err error) { } func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n30, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n30 } if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1505,25 +3278,32 @@ func (m *IngressStatus) Marshal() (dAtA []byte, err error) { } func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n31, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n31 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1531,36 +3311,36 @@ func (m *IngressTLS) Marshal() (dAtA []byte, err error) { } func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 if len(m.Hosts) > 0 { - for _, s := range m.Hosts { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1568,33 +3348,42 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n33, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n33 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1602,41 +3391,50 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1644,41 +3442,50 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1686,37 +3493,46 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n34, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1724,47 +3540,58 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n35, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 + i-- + dAtA[i] = 0x1a } if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n36, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n36 + i-- + dAtA[i] = 0x12 } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n37, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1772,33 +3599,41 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n38, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 + i-- + dAtA[i] = 0x12 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1806,64 +3641,69 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n39, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Egress) > 0 { - for _, msg := range m.Egress { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1871,33 +3711,42 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n40 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n41, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n41 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1905,37 +3754,46 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n42, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1943,288 +3801,283 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n43, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } } - i += n43 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n44, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } } - i += n44 - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n45, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } } - i += n45 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n46, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } } - i += n46 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ + i-- dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x78 } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x62 + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x3a } } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if m.RunAsGroup != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) - n47, err := m.RunAsGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 } - i += n47 } - if len(m.AllowedCSIDrivers) > 0 { - for _, msg := range m.AllowedCSIDrivers { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return i, nil + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2232,41 +4085,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n49, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n49 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n50, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n50 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2274,41 +4138,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n51, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n51 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2316,37 +4191,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n52, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2354,43 +4238,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n53, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n53 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n54, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n54 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2398,44 +4291,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2443,17 +4343,22 @@ func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { } func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2461,20 +4366,25 @@ func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { } func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2482,27 +4392,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n55, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n55 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2510,37 +4427,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n56 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n57, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n57 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2548,33 +4474,41 @@ func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2582,33 +4516,80 @@ func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2616,31 +4597,39 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n58, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n58 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2648,41 +4637,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n59, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n60, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n60 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n61, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n61 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2690,20 +4690,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2711,46 +4716,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2758,39 +4771,52 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2799,6 +4825,9 @@ func (m *AllowedCSIDriver) Size() (n int) { } func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Driver) @@ -2807,6 +4836,9 @@ func (m *AllowedFlexVolume) Size() (n int) { } func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.PathPrefix) @@ -2816,6 +4848,9 @@ func (m *AllowedHostPath) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -2828,6 +4863,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2844,6 +4882,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2858,6 +4899,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -2877,6 +4921,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -2900,6 +4947,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2912,6 +4962,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -2924,6 +4977,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2942,6 +4998,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2956,6 +5015,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2974,6 +5036,9 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -3003,6 +5068,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -3024,6 +5092,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3036,6 +5107,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3050,6 +5124,9 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -3060,6 +5137,9 @@ func (m *HTTPIngressPath) Size() (n int) { } func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -3072,6 +5152,9 @@ func (m *HTTPIngressRuleValue) Size() (n int) { } func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -3080,6 +5163,9 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -3088,6 +5174,9 @@ func (m *IDRange) Size() (n int) { } func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.CIDR) @@ -3102,6 +5191,9 @@ func (m *IPBlock) Size() (n int) { } func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3114,6 +5206,9 @@ func (m *Ingress) Size() (n int) { } func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ServiceName) @@ -3124,6 +5219,9 @@ func (m *IngressBackend) Size() (n int) { } func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3138,6 +5236,9 @@ func (m *IngressList) Size() (n int) { } func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Host) @@ -3148,6 +5249,9 @@ func (m *IngressRule) Size() (n int) { } func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HTTP != nil { @@ -3158,6 +5262,9 @@ func (m *IngressRuleValue) Size() (n int) { } func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Backend != nil { @@ -3180,6 +5287,9 @@ func (m *IngressSpec) Size() (n int) { } func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LoadBalancer.Size() @@ -3188,6 +5298,9 @@ func (m *IngressStatus) Size() (n int) { } func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Hosts) > 0 { @@ -3202,6 +5315,9 @@ func (m *IngressTLS) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3212,6 +5328,9 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -3230,6 +5349,9 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -3248,6 +5370,9 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3262,6 +5387,9 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PodSelector != nil { @@ -3280,6 +5408,9 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Protocol != nil { @@ -3294,6 +5425,9 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.PodSelector.Size() @@ -3320,6 +5454,9 @@ func (m *NetworkPolicySpec) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3330,6 +5467,9 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3344,6 +5484,9 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -3435,10 +5578,17 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3451,6 +5601,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3467,6 +5620,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3481,6 +5637,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -3497,6 +5656,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3514,12 +5676,18 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *ReplicationControllerDummy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -3527,6 +5695,9 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -3537,6 +5708,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -3551,6 +5725,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3565,6 +5742,9 @@ func (m *RunAsGroupStrategyOptions) Size() (n int) { } func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3578,7 +5758,29 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3591,6 +5793,9 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3603,6 +5808,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3610,6 +5818,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3627,6 +5838,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3641,14 +5855,7 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -3689,7 +5896,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3703,7 +5910,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3714,9 +5921,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3726,8 +5938,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, @@ -3740,6 +5952,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -3750,7 +5967,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -3761,7 +5978,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -3771,7 +5988,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3787,8 +6004,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3797,9 +6014,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3832,13 +6054,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -3848,13 +6070,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -3867,7 +6094,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -3876,9 +6103,14 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -3898,8 +6130,13 @@ func (this *HTTPIngressRuleValue) String() string { if this == nil { return "nil" } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, `}`, }, "") return s @@ -3942,7 +6179,7 @@ func (this *Ingress) String() string { return "nil" } s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3955,7 +6192,7 @@ func (this *IngressBackend) String() string { } s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3964,9 +6201,14 @@ func (this *IngressList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3987,7 +6229,7 @@ func (this *IngressRuleValue) String() string { return "nil" } s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, `}`, }, "") return s @@ -3996,10 +6238,20 @@ func (this *IngressSpec) String() string { if this == nil { return "nil" } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -4009,7 +6261,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4030,7 +6282,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4040,9 +6292,19 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, `}`, }, "") return s @@ -4051,9 +6313,19 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, `}`, }, "") return s @@ -4062,9 +6334,14 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4074,9 +6351,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -4087,7 +6364,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4096,10 +6373,20 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -4110,7 +6397,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4120,9 +6407,14 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4131,6 +6423,26 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -4138,7 +6450,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -4148,13 +6460,14 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -4164,7 +6477,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4178,7 +6491,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -4189,9 +6502,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4202,8 +6520,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -4213,13 +6531,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -4248,7 +6571,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4258,8 +6581,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4268,9 +6591,14 @@ func (this *RunAsGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -4279,9 +6607,25 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, `}`, }, "") return s @@ -4292,7 +6636,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -4302,7 +6646,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4345,9 +6689,14 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -4375,7 +6724,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4403,7 +6752,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4413,6 +6762,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4427,6 +6779,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4454,7 +6809,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4482,7 +6837,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4492,6 +6847,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4506,6 +6864,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4533,7 +6894,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4561,7 +6922,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4571,6 +6932,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4590,7 +6954,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4605,6 +6969,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4632,7 +6999,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4660,7 +7027,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4669,6 +7036,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4690,7 +7060,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4699,6 +7069,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4720,7 +7093,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4729,6 +7102,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4745,6 +7121,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4772,7 +7151,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4800,7 +7179,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4810,6 +7189,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4829,7 +7211,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4839,6 +7221,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4858,7 +7243,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4867,6 +7252,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4888,7 +7276,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4898,6 +7286,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4917,7 +7308,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4927,6 +7318,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4941,6 +7335,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4968,7 +7365,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4996,7 +7393,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5005,6 +7402,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5026,7 +7426,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5035,6 +7435,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5052,6 +7455,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5079,7 +7485,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5107,7 +7513,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5116,11 +7522,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5140,7 +7549,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5149,6 +7558,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5170,7 +7582,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5179,6 +7591,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5200,7 +7615,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5219,7 +7634,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TemplateGeneration |= (int64(b) & 0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5238,7 +7653,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5253,6 +7668,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5280,7 +7698,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5308,7 +7726,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5327,7 +7745,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5346,7 +7764,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5365,7 +7783,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5384,7 +7802,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5403,7 +7821,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5422,7 +7840,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5441,7 +7859,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5460,7 +7878,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5480,7 +7898,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5489,6 +7907,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5506,6 +7927,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5533,7 +7957,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5561,7 +7985,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5571,6 +7995,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5590,7 +8017,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5599,6 +8026,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5618,6 +8048,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5645,7 +8078,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5673,7 +8106,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5682,6 +8115,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5703,7 +8139,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5712,6 +8148,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5733,7 +8172,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5742,6 +8181,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5758,6 +8200,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5785,7 +8230,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5813,7 +8258,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5823,6 +8268,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5842,7 +8290,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5852,6 +8300,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5871,7 +8322,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5881,6 +8332,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5900,7 +8354,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5910,6 +8364,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5929,7 +8386,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5938,6 +8395,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5959,7 +8419,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5968,6 +8428,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5984,6 +8447,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6011,7 +8477,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6039,7 +8505,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6047,7 +8513,10 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6069,7 +8538,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6078,6 +8547,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6095,6 +8567,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6122,7 +8597,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6150,7 +8625,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6160,6 +8635,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6179,7 +8657,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6188,6 +8666,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6208,7 +8689,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6225,7 +8706,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6235,6 +8716,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6251,7 +8735,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6261,6 +8745,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6297,7 +8784,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6306,6 +8793,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6322,6 +8812,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6349,7 +8842,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6377,7 +8870,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6397,7 +8890,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6406,11 +8899,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6430,7 +8926,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6439,6 +8935,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6460,7 +8959,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6469,6 +8968,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6490,7 +8992,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6509,7 +9011,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6529,7 +9031,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6549,7 +9051,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6558,6 +9060,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6582,7 +9087,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6597,6 +9102,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6624,7 +9132,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6652,7 +9160,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6671,7 +9179,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6690,7 +9198,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6709,7 +9217,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6728,7 +9236,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6747,7 +9255,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6756,6 +9264,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6778,7 +9289,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6797,7 +9308,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6812,6 +9323,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6839,7 +9353,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6867,7 +9381,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6877,6 +9391,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6896,7 +9413,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6905,6 +9422,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6924,6 +9444,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6951,7 +9474,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6979,7 +9502,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6989,6 +9512,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7008,7 +9534,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7017,6 +9543,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7034,6 +9563,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7061,7 +9593,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7089,7 +9621,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7099,6 +9631,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7118,7 +9653,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7127,6 +9662,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7143,6 +9681,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7170,7 +9711,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7198,7 +9739,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7207,6 +9748,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7224,6 +9768,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7251,7 +9798,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7279,7 +9826,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + m.Min |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7298,7 +9845,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + m.Max |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7312,6 +9859,9 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7339,7 +9889,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7367,7 +9917,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.Min |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7386,7 +9936,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Max |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7400,6 +9950,9 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7427,7 +9980,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7455,7 +10008,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7465,6 +10018,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7484,7 +10040,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7494,6 +10050,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7508,6 +10067,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7535,7 +10097,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7563,7 +10125,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7572,6 +10134,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7593,7 +10158,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7602,6 +10167,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7623,7 +10191,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7632,6 +10200,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7648,6 +10219,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7675,7 +10249,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7703,7 +10277,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7713,6 +10287,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7732,7 +10309,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7741,6 +10318,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7757,6 +10337,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7784,7 +10367,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7812,7 +10395,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7821,6 +10404,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7842,7 +10428,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7851,6 +10437,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7868,6 +10457,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7895,7 +10487,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7923,7 +10515,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7933,6 +10525,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7952,7 +10547,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7961,6 +10556,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7977,6 +10575,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8004,7 +10605,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8032,7 +10633,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8041,6 +10642,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8060,6 +10664,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8087,7 +10694,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8115,7 +10722,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8124,6 +10731,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8148,7 +10758,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8157,6 +10767,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8179,7 +10792,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8188,6 +10801,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8205,6 +10821,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8232,7 +10851,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8260,7 +10879,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8269,6 +10888,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8285,6 +10907,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8312,7 +10937,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8340,7 +10965,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8350,6 +10975,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8369,7 +10997,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8379,6 +11007,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8393,6 +11024,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8420,7 +11054,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8448,7 +11082,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8457,6 +11091,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8478,7 +11115,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8487,6 +11124,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8503,6 +11143,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8530,7 +11173,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8558,7 +11201,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8567,6 +11210,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8589,7 +11235,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8598,6 +11244,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8615,6 +11264,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8642,7 +11294,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8670,7 +11322,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8679,6 +11331,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8701,7 +11356,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8710,6 +11365,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8727,6 +11385,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8754,7 +11415,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8782,7 +11443,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8791,6 +11452,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8812,7 +11476,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8821,6 +11485,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8838,6 +11505,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8865,7 +11535,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8893,7 +11563,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8902,11 +11572,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.PodSelector = &v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8926,7 +11599,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8935,11 +11608,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8959,7 +11635,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8968,6 +11644,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8987,6 +11666,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9014,7 +11696,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9042,7 +11724,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9052,6 +11734,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9072,7 +11757,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9081,11 +11766,14 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.Port = &intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9100,6 +11788,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9127,7 +11818,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9155,7 +11846,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9164,6 +11855,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9185,7 +11879,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9194,6 +11888,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9216,7 +11913,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9225,6 +11922,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9247,7 +11947,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9257,6 +11957,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9271,6 +11974,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9298,7 +12004,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9326,7 +12032,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9335,6 +12041,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9356,7 +12065,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9365,6 +12074,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9381,6 +12093,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9408,7 +12123,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9436,7 +12151,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9445,6 +12160,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9466,7 +12184,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9475,6 +12193,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9492,6 +12213,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9519,7 +12243,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9547,7 +12271,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9567,7 +12291,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9577,6 +12301,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9596,7 +12323,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9606,6 +12333,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9625,7 +12355,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9635,6 +12365,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9654,7 +12387,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9664,6 +12397,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9683,7 +12419,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9703,7 +12439,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9712,6 +12448,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9734,7 +12473,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9754,7 +12493,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9774,7 +12513,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9783,6 +12522,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9804,7 +12546,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9813,6 +12555,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9834,7 +12579,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9843,6 +12588,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9864,7 +12612,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9873,6 +12621,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9894,7 +12645,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9914,7 +12665,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9935,7 +12686,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9956,7 +12707,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9965,6 +12716,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9987,7 +12741,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9996,6 +12750,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10018,7 +12775,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10028,6 +12785,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10047,7 +12807,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10057,6 +12817,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10076,24 +12839,63 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 22: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10105,7 +12907,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10114,19 +12916,20 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10138,7 +12941,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10147,11 +12950,16 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10164,6 +12972,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10191,7 +13002,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10219,7 +13030,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10228,6 +13039,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10249,7 +13063,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10258,6 +13072,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10279,7 +13096,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10288,6 +13105,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10304,6 +13124,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10331,7 +13154,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10359,7 +13182,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10369,6 +13192,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10388,7 +13214,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10398,6 +13224,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10417,7 +13246,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10426,6 +13255,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10447,7 +13279,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10457,6 +13289,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10476,7 +13311,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10486,6 +13321,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10500,6 +13338,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10527,7 +13368,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10555,7 +13396,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10564,6 +13405,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10585,7 +13429,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10594,6 +13438,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10611,6 +13458,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10638,7 +13488,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10666,7 +13516,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10686,7 +13536,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10695,11 +13545,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -10719,7 +13572,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10728,6 +13581,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10749,7 +13605,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10763,6 +13619,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10790,7 +13649,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10818,7 +13677,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10837,7 +13696,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10856,7 +13715,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -10875,7 +13734,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10894,7 +13753,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10913,7 +13772,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10922,6 +13781,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10939,6 +13801,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10966,7 +13831,7 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10989,6 +13854,9 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11016,7 +13884,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11044,7 +13912,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -11058,6 +13926,9 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11085,7 +13956,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11113,7 +13984,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11122,11 +13993,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11141,6 +14015,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11168,7 +14045,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11196,7 +14073,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11205,11 +14082,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11229,7 +14109,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11238,11 +14118,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11257,6 +14140,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11284,7 +14170,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11312,7 +14198,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11322,6 +14208,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11341,7 +14230,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11350,6 +14239,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11367,6 +14259,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11394,7 +14289,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11422,7 +14317,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11432,6 +14327,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11451,7 +14349,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11460,6 +14358,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11477,6 +14378,127 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11504,7 +14526,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11532,7 +14554,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11542,6 +14564,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11561,7 +14586,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11570,11 +14595,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11589,6 +14617,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11616,7 +14647,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11644,7 +14675,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11653,6 +14684,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11674,7 +14708,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11683,6 +14717,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11704,7 +14741,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11713,6 +14750,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11729,6 +14769,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11756,7 +14799,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11784,7 +14827,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11798,6 +14841,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11825,7 +14871,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11853,7 +14899,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11872,7 +14918,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11881,6 +14927,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11901,7 +14950,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11918,7 +14967,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11928,6 +14977,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -11944,7 +14996,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11954,6 +15006,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -11990,7 +15045,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12000,6 +15055,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12014,6 +15072,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12041,7 +15102,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12069,7 +15130,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12079,6 +15140,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12098,7 +15162,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -12107,6 +15171,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12124,6 +15191,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12190,10 +15260,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -12222,6 +15295,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -12240,238 +15316,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 3622 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, - 0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x48, 0x91, 0x63, 0xc9, 0xe2, 0xc8, 0x6d, - 0x40, 0x91, 0x1d, 0x69, 0xc6, 0x92, 0x25, 0x59, 0xb1, 0x10, 0xdb, 0x1c, 0x52, 0x94, 0xe8, 0xf0, - 0x63, 0x5c, 0x43, 0x2a, 0x86, 0x11, 0x3b, 0x6e, 0xce, 0x14, 0x87, 0x2d, 0xf6, 0x74, 0xb7, 0xbb, - 0x6b, 0x68, 0x0e, 0x90, 0x43, 0x0e, 0x41, 0x80, 0x00, 0x09, 0x92, 0x8b, 0x93, 0x1c, 0x63, 0x04, - 0xc8, 0x29, 0x41, 0x72, 0xcb, 0x1e, 0x0c, 0x03, 0x0b, 0x78, 0x01, 0x61, 0xe1, 0x05, 0x7c, 0x5b, - 0x9f, 0x88, 0x35, 0x7d, 0x5a, 0xec, 0x3f, 0xb0, 0xd0, 0x61, 0x77, 0x51, 0xd5, 0xd5, 0xdf, 0xdd, - 0x9a, 0x26, 0x2d, 0x11, 0x8b, 0xc5, 0xde, 0x38, 0xf5, 0xde, 0xfb, 0xbd, 0x57, 0x55, 0xaf, 0xde, - 0x7b, 0x5d, 0xf5, 0x08, 0xcb, 0x7b, 0x77, 0xec, 0xaa, 0x6a, 0xd4, 0xf6, 0x7a, 0xdb, 0xc4, 0xd2, - 0x09, 0x25, 0x76, 0x6d, 0x9f, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x40, - 0x89, 0x6e, 0xab, 0x86, 0x6e, 0xd7, 0xf6, 0xaf, 0x6f, 0x13, 0xaa, 0x5c, 0xaf, 0x75, 0x88, 0x4e, - 0x2c, 0x85, 0x92, 0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x8b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, - 0xcf, 0x5e, 0x15, 0xec, 0xe7, 0xaf, 0x75, 0x54, 0xba, 0xdb, 0xdb, 0xae, 0xb6, 0x8c, 0x6e, 0xad, - 0x63, 0x74, 0x8c, 0x1a, 0x97, 0xda, 0xee, 0xed, 0xf0, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0x3b, - 0x2f, 0x07, 0x94, 0xb7, 0x0c, 0x8b, 0xd4, 0xf6, 0x63, 0x1a, 0xcf, 0xdf, 0xf4, 0x79, 0xba, 0x4a, - 0x6b, 0x57, 0xd5, 0x89, 0xd5, 0xaf, 0x99, 0x7b, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, - 0x52, 0xb5, 0x34, 0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xb7, 0x07, 0x09, 0xd8, 0xad, - 0x5d, 0xd2, 0x55, 0x62, 0x72, 0xaf, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, - 0x8a, 0x0a, 0xc9, 0x37, 0x61, 0x72, 0x41, 0xd3, 0x8c, 0x4f, 0x49, 0x7b, 0xb1, 0xb9, 0xb2, 0x64, - 0xa9, 0xfb, 0xc4, 0x42, 0x97, 0xa0, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0x2e, 0x49, 0x57, 0x46, 0xea, - 0x67, 0x1f, 0x1f, 0x56, 0xce, 0x1c, 0x1d, 0x56, 0x0a, 0xeb, 0x4a, 0x97, 0x60, 0x4e, 0x91, 0xef, - 0xc2, 0x94, 0x90, 0x5a, 0xd6, 0xc8, 0xc1, 0x43, 0x43, 0xeb, 0x75, 0x09, 0xba, 0x0c, 0xc3, 0x6d, - 0x0e, 0x20, 0x04, 0xc7, 0x85, 0xe0, 0xb0, 0x03, 0x8b, 0x05, 0x55, 0xb6, 0x61, 0x42, 0x08, 0x3f, - 0x30, 0x6c, 0xda, 0x50, 0xe8, 0x2e, 0xba, 0x01, 0x60, 0x2a, 0x74, 0xb7, 0x61, 0x91, 0x1d, 0xf5, - 0x40, 0x88, 0x23, 0x21, 0x0e, 0x0d, 0x8f, 0x82, 0x03, 0x5c, 0xe8, 0x2a, 0x94, 0x2c, 0xa2, 0xb4, - 0x37, 0x74, 0xad, 0x5f, 0xce, 0x5d, 0x92, 0xae, 0x94, 0xea, 0x93, 0x42, 0xa2, 0x84, 0xc5, 0x38, - 0xf6, 0x38, 0xe4, 0xcf, 0x72, 0x30, 0xb2, 0xa4, 0x90, 0xae, 0xa1, 0x37, 0x09, 0x45, 0x1f, 0x43, - 0x89, 0x6d, 0x57, 0x5b, 0xa1, 0x0a, 0xd7, 0x36, 0x7a, 0xe3, 0xb5, 0xaa, 0xef, 0x4e, 0xde, 0xea, - 0x55, 0xcd, 0xbd, 0x0e, 0x1b, 0xb0, 0xab, 0x8c, 0xbb, 0xba, 0x7f, 0xbd, 0xba, 0xb1, 0xfd, 0x88, - 0xb4, 0xe8, 0x1a, 0xa1, 0x8a, 0x6f, 0x9f, 0x3f, 0x86, 0x3d, 0x54, 0xb4, 0x0e, 0x05, 0xdb, 0x24, - 0x2d, 0x6e, 0xd9, 0xe8, 0x8d, 0xab, 0xd5, 0xa7, 0x3a, 0x6b, 0xd5, 0xb3, 0xac, 0x69, 0x92, 0x96, - 0xbf, 0xe2, 0xec, 0x17, 0xe6, 0x38, 0xe8, 0x21, 0x0c, 0xdb, 0x54, 0xa1, 0x3d, 0xbb, 0x9c, 0xe7, - 0x88, 0xd5, 0xcc, 0x88, 0x5c, 0xca, 0xdf, 0x0c, 0xe7, 0x37, 0x16, 0x68, 0xf2, 0x2f, 0x73, 0x80, - 0x3c, 0xde, 0x45, 0x43, 0x6f, 0xab, 0x54, 0x35, 0x74, 0xf4, 0x26, 0x14, 0x68, 0xdf, 0x74, 0x5d, - 0xe0, 0xb2, 0x6b, 0xd0, 0x66, 0xdf, 0x24, 0x4f, 0x0e, 0x2b, 0xb3, 0x71, 0x09, 0x46, 0xc1, 0x5c, - 0x06, 0xad, 0x7a, 0xa6, 0xe6, 0xb8, 0xf4, 0xcd, 0xb0, 0xea, 0x27, 0x87, 0x95, 0x84, 0xc3, 0x56, - 0xf5, 0x90, 0xc2, 0x06, 0xa2, 0x7d, 0x40, 0x9a, 0x62, 0xd3, 0x4d, 0x4b, 0xd1, 0x6d, 0x47, 0x93, - 0xda, 0x25, 0x62, 0x11, 0x5e, 0xcd, 0xb6, 0x69, 0x4c, 0xa2, 0x7e, 0x5e, 0x58, 0x81, 0x56, 0x63, - 0x68, 0x38, 0x41, 0x03, 0xf3, 0x66, 0x8b, 0x28, 0xb6, 0xa1, 0x97, 0x0b, 0x61, 0x6f, 0xc6, 0x7c, - 0x14, 0x0b, 0x2a, 0x7a, 0x05, 0x8a, 0x5d, 0x62, 0xdb, 0x4a, 0x87, 0x94, 0x87, 0x38, 0xe3, 0x84, - 0x60, 0x2c, 0xae, 0x39, 0xc3, 0xd8, 0xa5, 0xcb, 0x5f, 0x48, 0x30, 0xe6, 0xad, 0xdc, 0xaa, 0x6a, - 0x53, 0xf4, 0x57, 0x31, 0x3f, 0xac, 0x66, 0x9b, 0x12, 0x93, 0xe6, 0x5e, 0xe8, 0xf9, 0xbc, 0x3b, - 0x12, 0xf0, 0xc1, 0x35, 0x18, 0x52, 0x29, 0xe9, 0xb2, 0x7d, 0xc8, 0x5f, 0x19, 0xbd, 0x71, 0x25, - 0xab, 0xcb, 0xd4, 0xc7, 0x04, 0xe8, 0xd0, 0x0a, 0x13, 0xc7, 0x0e, 0x8a, 0xfc, 0xaf, 0x85, 0x80, - 0xf9, 0xcc, 0x35, 0xd1, 0x87, 0x50, 0xb2, 0x89, 0x46, 0x5a, 0xd4, 0xb0, 0x84, 0xf9, 0xaf, 0x67, - 0x34, 0x5f, 0xd9, 0x26, 0x5a, 0x53, 0x88, 0xd6, 0xcf, 0x32, 0xfb, 0xdd, 0x5f, 0xd8, 0x83, 0x44, - 0xef, 0x41, 0x89, 0x92, 0xae, 0xa9, 0x29, 0x94, 0x88, 0x73, 0xf4, 0x72, 0x70, 0x0a, 0xcc, 0x73, - 0x18, 0x58, 0xc3, 0x68, 0x6f, 0x0a, 0x36, 0x7e, 0x7c, 0xbc, 0x25, 0x71, 0x47, 0xb1, 0x07, 0x83, - 0xf6, 0x61, 0xbc, 0x67, 0xb6, 0x19, 0x27, 0x65, 0x51, 0xb0, 0xd3, 0x17, 0x9e, 0x74, 0x3b, 0xeb, - 0xda, 0x6c, 0x85, 0xa4, 0xeb, 0xb3, 0x42, 0xd7, 0x78, 0x78, 0x1c, 0x47, 0xb4, 0xa0, 0x05, 0x98, - 0xe8, 0xaa, 0x3a, 0x8b, 0x4b, 0xfd, 0x26, 0x69, 0x19, 0x7a, 0xdb, 0xe6, 0x6e, 0x35, 0x54, 0x9f, - 0x13, 0x00, 0x13, 0x6b, 0x61, 0x32, 0x8e, 0xf2, 0xa3, 0x77, 0x01, 0xb9, 0xd3, 0xb8, 0xef, 0x04, - 0x71, 0xd5, 0xd0, 0xb9, 0xcf, 0xe5, 0x7d, 0xe7, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0xb4, 0x0a, - 0x33, 0x16, 0xd9, 0x57, 0xd9, 0x1c, 0x1f, 0xa8, 0x36, 0x35, 0xac, 0xfe, 0xaa, 0xda, 0x55, 0x69, - 0x79, 0x98, 0xdb, 0x54, 0x3e, 0x3a, 0xac, 0xcc, 0xe0, 0x04, 0x3a, 0x4e, 0x94, 0x92, 0xff, 0x6d, - 0x18, 0x26, 0x22, 0xf1, 0x06, 0x3d, 0x84, 0xd9, 0x56, 0xcf, 0xb2, 0x88, 0x4e, 0xd7, 0x7b, 0xdd, - 0x6d, 0x62, 0x35, 0x5b, 0xbb, 0xa4, 0xdd, 0xd3, 0x48, 0x9b, 0x3b, 0xca, 0x50, 0x7d, 0x5e, 0x58, - 0x3c, 0xbb, 0x98, 0xc8, 0x85, 0x53, 0xa4, 0xd9, 0x2a, 0xe8, 0x7c, 0x68, 0x4d, 0xb5, 0x6d, 0x0f, - 0x33, 0xc7, 0x31, 0xbd, 0x55, 0x58, 0x8f, 0x71, 0xe0, 0x04, 0x29, 0x66, 0x63, 0x9b, 0xd8, 0xaa, - 0x45, 0xda, 0x51, 0x1b, 0xf3, 0x61, 0x1b, 0x97, 0x12, 0xb9, 0x70, 0x8a, 0x34, 0xba, 0x05, 0xa3, - 0x8e, 0x36, 0xbe, 0x7f, 0x62, 0xa3, 0xa7, 0x05, 0xd8, 0xe8, 0xba, 0x4f, 0xc2, 0x41, 0x3e, 0x36, - 0x35, 0x63, 0xdb, 0x26, 0xd6, 0x3e, 0x69, 0xa7, 0x6f, 0xf0, 0x46, 0x8c, 0x03, 0x27, 0x48, 0xb1, - 0xa9, 0x39, 0x1e, 0x18, 0x9b, 0xda, 0x70, 0x78, 0x6a, 0x5b, 0x89, 0x5c, 0x38, 0x45, 0x9a, 0xf9, - 0xb1, 0x63, 0xf2, 0xc2, 0xbe, 0xa2, 0x6a, 0xca, 0xb6, 0x46, 0xca, 0xc5, 0xb0, 0x1f, 0xaf, 0x87, - 0xc9, 0x38, 0xca, 0x8f, 0xee, 0xc3, 0x94, 0x33, 0xb4, 0xa5, 0x2b, 0x1e, 0x48, 0x89, 0x83, 0xbc, - 0x20, 0x40, 0xa6, 0xd6, 0xa3, 0x0c, 0x38, 0x2e, 0x83, 0xde, 0x84, 0xf1, 0x96, 0xa1, 0x69, 0xdc, - 0x1f, 0x17, 0x8d, 0x9e, 0x4e, 0xcb, 0x23, 0x1c, 0x05, 0xb1, 0xf3, 0xb8, 0x18, 0xa2, 0xe0, 0x08, - 0x27, 0x22, 0x00, 0x2d, 0x37, 0xe1, 0xd8, 0x65, 0xe0, 0xf1, 0xf1, 0x7a, 0xd6, 0x18, 0xe0, 0xa5, - 0x2a, 0xbf, 0x06, 0xf0, 0x86, 0x6c, 0x1c, 0x00, 0x96, 0x7f, 0x2a, 0xc1, 0x5c, 0x4a, 0xe8, 0x40, - 0x6f, 0x87, 0x52, 0xec, 0x9f, 0x46, 0x52, 0xec, 0x85, 0x14, 0xb1, 0x40, 0x9e, 0xd5, 0x61, 0xcc, - 0x62, 0xb3, 0xd2, 0x3b, 0x0e, 0x8b, 0x88, 0x91, 0xb7, 0x06, 0x4c, 0x03, 0x07, 0x65, 0xfc, 0x98, - 0x3f, 0x75, 0x74, 0x58, 0x19, 0x0b, 0xd1, 0x70, 0x18, 0x5e, 0xfe, 0xf7, 0x1c, 0xc0, 0x12, 0x31, - 0x35, 0xa3, 0xdf, 0x25, 0xfa, 0x69, 0xd4, 0x50, 0x1b, 0xa1, 0x1a, 0xea, 0xda, 0xa0, 0xed, 0xf1, - 0x4c, 0x4b, 0x2d, 0xa2, 0xfe, 0x32, 0x52, 0x44, 0xd5, 0xb2, 0x43, 0x3e, 0xbd, 0x8a, 0xfa, 0x79, - 0x1e, 0xa6, 0x7d, 0x66, 0xbf, 0x8c, 0xba, 0x1b, 0xda, 0xe3, 0x3f, 0x89, 0xec, 0xf1, 0x5c, 0x82, - 0xc8, 0x73, 0xab, 0xa3, 0x9e, 0x7d, 0x3d, 0x83, 0x1e, 0xc1, 0x38, 0x2b, 0x9c, 0x1c, 0xf7, 0xe0, - 0x65, 0xd9, 0xf0, 0xb1, 0xcb, 0x32, 0x2f, 0x81, 0xae, 0x86, 0x90, 0x70, 0x04, 0x39, 0xa5, 0x0c, - 0x2c, 0x3e, 0xef, 0x32, 0x50, 0xfe, 0x52, 0x82, 0x71, 0x7f, 0x9b, 0x4e, 0xa1, 0x68, 0x5b, 0x0f, - 0x17, 0x6d, 0xaf, 0x64, 0x76, 0xd1, 0x94, 0xaa, 0xed, 0xd7, 0xac, 0xc0, 0xf7, 0x98, 0xd8, 0x01, - 0xdf, 0x56, 0x5a, 0x7b, 0x83, 0xbf, 0xf1, 0xd0, 0x67, 0x12, 0x20, 0x91, 0x05, 0x16, 0x74, 0xdd, - 0xa0, 0x8a, 0x13, 0x2b, 0x1d, 0xb3, 0x56, 0x32, 0x9b, 0xe5, 0x6a, 0xac, 0x6e, 0xc5, 0xb0, 0xee, - 0xe9, 0xd4, 0xea, 0xfb, 0x3b, 0x12, 0x67, 0xc0, 0x09, 0x06, 0x20, 0x05, 0xc0, 0x12, 0x98, 0x9b, - 0x86, 0x38, 0xc8, 0xd7, 0x32, 0xc4, 0x3c, 0x26, 0xb0, 0x68, 0xe8, 0x3b, 0x6a, 0xc7, 0x0f, 0x3b, - 0xd8, 0x03, 0xc2, 0x01, 0xd0, 0xf3, 0xf7, 0x60, 0x2e, 0xc5, 0x5a, 0x34, 0x09, 0xf9, 0x3d, 0xd2, - 0x77, 0x96, 0x0d, 0xb3, 0x3f, 0xd1, 0x0c, 0x0c, 0xed, 0x2b, 0x5a, 0xcf, 0x09, 0xbf, 0x23, 0xd8, - 0xf9, 0xf1, 0x66, 0xee, 0x8e, 0x24, 0x7f, 0x31, 0x14, 0xf4, 0x1d, 0x5e, 0x31, 0x5f, 0x61, 0x1f, - 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42, 0x67, 0x9d, 0x0f, 0x56, 0x67, 0x0c, 0x7b, 0xd4, - 0x50, 0x6d, 0x9d, 0x7b, 0xbe, 0xb5, 0x75, 0xfe, 0xd9, 0xd4, 0xd6, 0x7f, 0x0d, 0x25, 0xdb, 0xad, - 0xaa, 0x0b, 0x1c, 0xf2, 0xfa, 0x31, 0xe2, 0xab, 0x28, 0xa8, 0x3d, 0x05, 0x5e, 0x29, 0xed, 0x81, - 0x26, 0x15, 0xd1, 0x43, 0xc7, 0x2c, 0xa2, 0x9f, 0x69, 0xe1, 0xcb, 0x62, 0xaa, 0xa9, 0xf4, 0x6c, - 0xd2, 0xe6, 0x81, 0xa8, 0xe4, 0xc7, 0xd4, 0x06, 0x1f, 0xc5, 0x82, 0x8a, 0x3e, 0x0c, 0xb9, 0x6c, - 0xe9, 0x24, 0x2e, 0x3b, 0x9e, 0xee, 0xae, 0x68, 0x0b, 0xe6, 0x4c, 0xcb, 0xe8, 0x58, 0xc4, 0xb6, - 0x97, 0x88, 0xd2, 0xd6, 0x54, 0x9d, 0xb8, 0xeb, 0xe3, 0x54, 0x44, 0x17, 0x8e, 0x0e, 0x2b, 0x73, - 0x8d, 0x64, 0x16, 0x9c, 0x26, 0x2b, 0x3f, 0x2e, 0xc0, 0x64, 0x34, 0x03, 0xa6, 0x14, 0xa9, 0xd2, - 0x89, 0x8a, 0xd4, 0xab, 0x81, 0xc3, 0xe0, 0x54, 0xf0, 0x81, 0x1b, 0x9c, 0xd8, 0x81, 0x58, 0x80, - 0x09, 0x11, 0x0d, 0x5c, 0xa2, 0x28, 0xd3, 0xbd, 0xdd, 0xdf, 0x0a, 0x93, 0x71, 0x94, 0x9f, 0x95, - 0x9e, 0x7e, 0x45, 0xe9, 0x82, 0x14, 0xc2, 0xa5, 0xe7, 0x42, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x1a, - 0x4c, 0xf7, 0xf4, 0x38, 0x94, 0xe3, 0x8d, 0x17, 0x04, 0xd4, 0xf4, 0x56, 0x9c, 0x05, 0x27, 0xc9, - 0xa1, 0x9d, 0x50, 0x35, 0x3a, 0xcc, 0x23, 0xec, 0x8d, 0xcc, 0x67, 0x27, 0x73, 0x39, 0x8a, 0xee, - 0xc2, 0x98, 0xc5, 0xbf, 0x3b, 0x5c, 0x83, 0x9d, 0xda, 0xfd, 0x9c, 0x10, 0x1b, 0xc3, 0x41, 0x22, - 0x0e, 0xf3, 0x26, 0x94, 0xdb, 0xa5, 0xac, 0xe5, 0xb6, 0xfc, 0x63, 0x29, 0x98, 0x84, 0xbc, 0x12, - 0x78, 0xd0, 0x2d, 0x53, 0x4c, 0x22, 0x50, 0x1d, 0x19, 0xc9, 0xd5, 0xef, 0xed, 0x63, 0x55, 0xbf, - 0x7e, 0xf2, 0x1c, 0x5c, 0xfe, 0x7e, 0x2e, 0xc1, 0xec, 0x72, 0xf3, 0xbe, 0x65, 0xf4, 0x4c, 0xd7, - 0x9c, 0x0d, 0xd3, 0x59, 0xd7, 0x37, 0xa0, 0x60, 0xf5, 0x34, 0x77, 0x1e, 0x2f, 0xbb, 0xf3, 0xc0, - 0x3d, 0x8d, 0xcd, 0x63, 0x3a, 0x22, 0xe5, 0x4c, 0x82, 0x09, 0xa0, 0x75, 0x18, 0xb6, 0x14, 0xbd, - 0x43, 0xdc, 0xb4, 0x7a, 0x79, 0x80, 0xf5, 0x2b, 0x4b, 0x98, 0xb1, 0x07, 0x8a, 0x37, 0x2e, 0x8d, - 0x05, 0x8a, 0xfc, 0x4f, 0x12, 0x4c, 0x3c, 0xd8, 0xdc, 0x6c, 0xac, 0xe8, 0xfc, 0x44, 0xf3, 0xbb, - 0xd5, 0x4b, 0x50, 0x30, 0x15, 0xba, 0x1b, 0xcd, 0xf4, 0x8c, 0x86, 0x39, 0x05, 0xbd, 0x0f, 0x45, - 0x16, 0x49, 0x88, 0xde, 0xce, 0x58, 0x6a, 0x0b, 0xf8, 0xba, 0x23, 0xe4, 0x57, 0x88, 0x62, 0x00, - 0xbb, 0x70, 0xf2, 0x1e, 0xcc, 0x04, 0xcc, 0x61, 0xeb, 0xf1, 0x90, 0x65, 0x47, 0xd4, 0x84, 0x21, - 0xa6, 0x99, 0xe5, 0xc0, 0x7c, 0x86, 0xcb, 0xcc, 0xc8, 0x94, 0xfc, 0x4a, 0x87, 0xfd, 0xb2, 0xb1, - 0x83, 0x25, 0xaf, 0xc1, 0x18, 0xbf, 0x50, 0x36, 0x2c, 0xca, 0x97, 0x05, 0x5d, 0x84, 0x7c, 0x57, - 0xd5, 0x45, 0x9e, 0x1d, 0x15, 0x32, 0x79, 0x96, 0x23, 0xd8, 0x38, 0x27, 0x2b, 0x07, 0x22, 0xf2, - 0xf8, 0x64, 0xe5, 0x00, 0xb3, 0x71, 0xf9, 0x3e, 0x14, 0xc5, 0x72, 0x07, 0x81, 0xf2, 0x4f, 0x07, - 0xca, 0x27, 0x00, 0x6d, 0x40, 0x71, 0xa5, 0x51, 0xd7, 0x0c, 0xa7, 0xea, 0x6a, 0xa9, 0x6d, 0x2b, - 0xba, 0x17, 0x8b, 0x2b, 0x4b, 0x18, 0x73, 0x0a, 0x92, 0x61, 0x98, 0x1c, 0xb4, 0x88, 0x49, 0xb9, - 0x47, 0x8c, 0xd4, 0x81, 0xed, 0xf2, 0x3d, 0x3e, 0x82, 0x05, 0x45, 0xfe, 0xe7, 0x1c, 0x14, 0xc5, - 0x72, 0x9c, 0xc2, 0x57, 0xd8, 0x6a, 0xe8, 0x2b, 0xec, 0xd5, 0x6c, 0xae, 0x91, 0xfa, 0x09, 0xb6, - 0x19, 0xf9, 0x04, 0xbb, 0x9a, 0x11, 0xef, 0xe9, 0xdf, 0x5f, 0xff, 0x27, 0xc1, 0x78, 0xd8, 0x29, - 0xd1, 0x2d, 0x18, 0x65, 0x09, 0x47, 0x6d, 0x91, 0x75, 0xbf, 0xce, 0xf5, 0x2e, 0x61, 0x9a, 0x3e, - 0x09, 0x07, 0xf9, 0x50, 0xc7, 0x13, 0x63, 0x7e, 0x24, 0x26, 0x9d, 0xbe, 0xa4, 0x3d, 0xaa, 0x6a, - 0x55, 0xe7, 0x69, 0xa5, 0xba, 0xa2, 0xd3, 0x0d, 0xab, 0x49, 0x2d, 0x55, 0xef, 0xc4, 0x14, 0x71, - 0xa7, 0x0c, 0x22, 0xcb, 0x3f, 0x92, 0x60, 0x54, 0x98, 0x7c, 0x0a, 0x5f, 0x15, 0x7f, 0x11, 0xfe, - 0xaa, 0xb8, 0x9c, 0xf1, 0x80, 0x27, 0x7f, 0x52, 0xfc, 0x97, 0x6f, 0x3a, 0x3b, 0xd2, 0xcc, 0xab, - 0x77, 0x0d, 0x9b, 0x46, 0xbd, 0x9a, 0x1d, 0x46, 0xcc, 0x29, 0xa8, 0x07, 0x93, 0x6a, 0x24, 0x06, - 0x88, 0xa5, 0xad, 0x65, 0xb3, 0xc4, 0x13, 0xab, 0x97, 0x05, 0xfc, 0x64, 0x94, 0x82, 0x63, 0x2a, - 0x64, 0x02, 0x31, 0x2e, 0xf4, 0x1e, 0x14, 0x76, 0x29, 0x35, 0x13, 0xee, 0xab, 0x07, 0x44, 0x1e, - 0xdf, 0x84, 0x12, 0x9f, 0xdd, 0xe6, 0x66, 0x03, 0x73, 0x28, 0xf9, 0x37, 0xfe, 0x7a, 0x34, 0x1d, - 0x1f, 0xf7, 0xe2, 0xa9, 0x74, 0x92, 0x78, 0x3a, 0x9a, 0x14, 0x4b, 0xd1, 0x03, 0xc8, 0x53, 0x2d, - 0xeb, 0x67, 0xa1, 0x40, 0xdc, 0x5c, 0x6d, 0xfa, 0x01, 0x69, 0x73, 0xb5, 0x89, 0x19, 0x04, 0xda, - 0x80, 0x21, 0x96, 0x7d, 0xd8, 0x11, 0xcc, 0x67, 0x3f, 0xd2, 0x6c, 0xfe, 0xbe, 0x43, 0xb0, 0x5f, - 0x36, 0x76, 0x70, 0xe4, 0x4f, 0x60, 0x2c, 0x74, 0x4e, 0xd1, 0xc7, 0x70, 0x56, 0x33, 0x94, 0x76, - 0x5d, 0xd1, 0x14, 0xbd, 0x45, 0xdc, 0xc7, 0x81, 0xcb, 0x49, 0x5f, 0x18, 0xab, 0x01, 0x3e, 0x71, - 0xca, 0x67, 0x84, 0x92, 0xb3, 0x41, 0x1a, 0x0e, 0x21, 0xca, 0x0a, 0x80, 0x3f, 0x47, 0x54, 0x81, - 0x21, 0xe6, 0x67, 0x4e, 0x3e, 0x19, 0xa9, 0x8f, 0x30, 0x0b, 0x99, 0xfb, 0xd9, 0xd8, 0x19, 0x47, - 0x37, 0x00, 0x6c, 0xd2, 0xb2, 0x08, 0xe5, 0xc1, 0x20, 0x17, 0x7e, 0x60, 0x6c, 0x7a, 0x14, 0x1c, - 0xe0, 0x92, 0x7f, 0x22, 0xc1, 0xd8, 0x3a, 0xa1, 0x9f, 0x1a, 0xd6, 0x5e, 0xc3, 0xd0, 0xd4, 0x56, - 0xff, 0x14, 0x82, 0x2d, 0x0e, 0x05, 0xdb, 0xd7, 0x06, 0xec, 0x4c, 0xc8, 0xba, 0xb4, 0x90, 0x2b, - 0x7f, 0x29, 0xc1, 0x5c, 0x88, 0xf3, 0x9e, 0x7f, 0x74, 0xb7, 0x60, 0xc8, 0x34, 0x2c, 0xea, 0x26, - 0xe2, 0x63, 0x29, 0x64, 0x61, 0x2c, 0x90, 0x8a, 0x19, 0x0c, 0x76, 0xd0, 0xd0, 0x2a, 0xe4, 0xa8, - 0x21, 0x5c, 0xf5, 0x78, 0x98, 0x84, 0x58, 0x75, 0x10, 0x98, 0xb9, 0x4d, 0x03, 0xe7, 0xa8, 0xc1, - 0x36, 0xa2, 0x1c, 0xe2, 0x0a, 0x06, 0x9f, 0xe7, 0x34, 0x03, 0x0c, 0x85, 0x1d, 0xcb, 0xe8, 0x9e, - 0x78, 0x0e, 0xde, 0x46, 0x2c, 0x5b, 0x46, 0x17, 0x73, 0x2c, 0xf9, 0x2b, 0x09, 0xa6, 0x42, 0x9c, - 0xa7, 0x10, 0xf8, 0xdf, 0x0b, 0x07, 0xfe, 0xab, 0xc7, 0x99, 0x48, 0x4a, 0xf8, 0xff, 0x2a, 0x17, - 0x99, 0x06, 0x9b, 0x30, 0xda, 0x81, 0x51, 0xd3, 0x68, 0x37, 0x9f, 0xc1, 0x73, 0xe0, 0x04, 0xcb, - 0x9b, 0x0d, 0x1f, 0x0b, 0x07, 0x81, 0xd1, 0x01, 0x4c, 0xe9, 0x4a, 0x97, 0xd8, 0xa6, 0xd2, 0x22, - 0xcd, 0x67, 0x70, 0x41, 0x72, 0x8e, 0xbf, 0x37, 0x44, 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x83, 0xa2, - 0x6a, 0xf2, 0x3a, 0x4e, 0xd4, 0x2e, 0x03, 0xb3, 0xa8, 0x53, 0xf5, 0x39, 0xf1, 0x5c, 0xfc, 0xc0, - 0x2e, 0x86, 0xfc, 0xdf, 0x51, 0x6f, 0x60, 0xfe, 0x87, 0xee, 0x43, 0x89, 0x37, 0x66, 0xb4, 0x0c, - 0xcd, 0x7d, 0x19, 0x60, 0x3b, 0xdb, 0x10, 0x63, 0x4f, 0x0e, 0x2b, 0x17, 0x12, 0x2e, 0x7d, 0x5d, - 0x32, 0xf6, 0x84, 0xd1, 0x3a, 0x14, 0xcc, 0x1f, 0x52, 0xc1, 0xf0, 0x24, 0xc7, 0xcb, 0x16, 0x8e, - 0x23, 0xff, 0x5d, 0x3e, 0x62, 0x2e, 0x4f, 0x75, 0x8f, 0x9e, 0xd9, 0xae, 0x7b, 0x15, 0x53, 0xea, - 0xce, 0x6f, 0x43, 0x51, 0x64, 0x78, 0xe1, 0xcc, 0x6f, 0x1c, 0xc7, 0x99, 0x83, 0x59, 0xcc, 0xfb, - 0x60, 0x71, 0x07, 0x5d, 0x60, 0xf4, 0x11, 0x0c, 0x13, 0x47, 0x85, 0x93, 0x1b, 0x6f, 0x1f, 0x47, - 0x85, 0x1f, 0x57, 0xfd, 0x42, 0x55, 0x8c, 0x09, 0x54, 0xf4, 0x36, 0x5b, 0x2f, 0xc6, 0xcb, 0x3e, - 0x02, 0xed, 0x72, 0x81, 0xa7, 0xab, 0x8b, 0xce, 0xb4, 0xbd, 0xe1, 0x27, 0x87, 0x15, 0xf0, 0x7f, - 0xe2, 0xa0, 0x84, 0xfc, 0x33, 0x09, 0xa6, 0xf8, 0x0a, 0xb5, 0x7a, 0x96, 0x4a, 0xfb, 0xa7, 0x96, - 0x98, 0x1e, 0x86, 0x12, 0xd3, 0xcd, 0x01, 0xcb, 0x12, 0xb3, 0x30, 0x35, 0x39, 0x7d, 0x2d, 0xc1, - 0xb9, 0x18, 0xf7, 0x29, 0xc4, 0xc5, 0xad, 0x70, 0x5c, 0x7c, 0xed, 0xb8, 0x13, 0x4a, 0x89, 0x8d, - 0xbf, 0x9d, 0x4c, 0x98, 0x0e, 0x3f, 0x29, 0x37, 0x00, 0x4c, 0x4b, 0xdd, 0x57, 0x35, 0xd2, 0x11, - 0x8f, 0xe0, 0xa5, 0x40, 0x8b, 0x93, 0x47, 0xc1, 0x01, 0x2e, 0x64, 0xc3, 0x6c, 0x9b, 0xec, 0x28, - 0x3d, 0x8d, 0x2e, 0xb4, 0xdb, 0x8b, 0x8a, 0xa9, 0x6c, 0xab, 0x9a, 0x4a, 0x55, 0x71, 0x5d, 0x30, - 0x52, 0xbf, 0xeb, 0x3c, 0x4e, 0x27, 0x71, 0x3c, 0x39, 0xac, 0x5c, 0x4c, 0x7a, 0x1d, 0x72, 0x59, - 0xfa, 0x38, 0x05, 0x1a, 0xf5, 0xa1, 0x6c, 0x91, 0x4f, 0x7a, 0xaa, 0x45, 0xda, 0x4b, 0x96, 0x61, - 0x86, 0xd4, 0xe6, 0xb9, 0xda, 0x3f, 0x3f, 0x3a, 0xac, 0x94, 0x71, 0x0a, 0xcf, 0x60, 0xc5, 0xa9, - 0xf0, 0xe8, 0x11, 0x4c, 0x2b, 0xa2, 0x19, 0x2d, 0xa8, 0xd5, 0x39, 0x25, 0x77, 0x8e, 0x0e, 0x2b, - 0xd3, 0x0b, 0x71, 0xf2, 0x60, 0x85, 0x49, 0xa0, 0xa8, 0x06, 0xc5, 0x7d, 0xde, 0xb7, 0x66, 0x97, - 0x87, 0x38, 0x3e, 0x4b, 0x04, 0x45, 0xa7, 0x95, 0x8d, 0x61, 0x0e, 0x2f, 0x37, 0xf9, 0xe9, 0x73, - 0xb9, 0xd8, 0x07, 0x25, 0xab, 0x25, 0xc5, 0x89, 0xe7, 0x37, 0xc6, 0x25, 0x3f, 0x6a, 0x3d, 0xf0, - 0x49, 0x38, 0xc8, 0x87, 0x3e, 0x84, 0x91, 0x5d, 0x71, 0x2b, 0x61, 0x97, 0x8b, 0x99, 0x92, 0x70, - 0xe8, 0x16, 0xa3, 0x3e, 0x25, 0x54, 0x8c, 0xb8, 0xc3, 0x36, 0xf6, 0x11, 0xd1, 0x2b, 0x50, 0xe4, - 0x3f, 0x56, 0x96, 0xf8, 0x75, 0x5c, 0xc9, 0x8f, 0x6d, 0x0f, 0x9c, 0x61, 0xec, 0xd2, 0x5d, 0xd6, - 0x95, 0xc6, 0x22, 0xbf, 0x16, 0x8e, 0xb0, 0xae, 0x34, 0x16, 0xb1, 0x4b, 0x47, 0x1f, 0x43, 0xd1, - 0x26, 0xab, 0xaa, 0xde, 0x3b, 0x28, 0x43, 0xa6, 0x47, 0xe5, 0xe6, 0x3d, 0xce, 0x1d, 0xb9, 0x18, - 0xf3, 0x35, 0x08, 0x3a, 0x76, 0x61, 0xd1, 0x2e, 0x8c, 0x58, 0x3d, 0x7d, 0xc1, 0xde, 0xb2, 0x89, - 0x55, 0x1e, 0xe5, 0x3a, 0x06, 0x85, 0x73, 0xec, 0xf2, 0x47, 0xb5, 0x78, 0x2b, 0xe4, 0x71, 0x60, - 0x1f, 0x1c, 0xfd, 0xa3, 0x04, 0xc8, 0xee, 0x99, 0xa6, 0x46, 0xba, 0x44, 0xa7, 0x8a, 0xc6, 0xef, - 0xe2, 0xec, 0xf2, 0x59, 0xae, 0xf3, 0x9d, 0x41, 0xf3, 0x8a, 0x09, 0x46, 0x95, 0x7b, 0x97, 0xde, - 0x71, 0x56, 0x9c, 0xa0, 0x97, 0x2d, 0xed, 0x8e, 0xcd, 0xff, 0x2e, 0x8f, 0x65, 0x5a, 0xda, 0xe4, - 0x3b, 0x47, 0x7f, 0x69, 0x05, 0x1d, 0xbb, 0xb0, 0xe8, 0x21, 0xcc, 0xba, 0x6d, 0x8f, 0xd8, 0x30, - 0xe8, 0xb2, 0xaa, 0x11, 0xbb, 0x6f, 0x53, 0xd2, 0x2d, 0x8f, 0xf3, 0x6d, 0xf7, 0x7a, 0x3f, 0x70, - 0x22, 0x17, 0x4e, 0x91, 0x46, 0x5d, 0xa8, 0xb8, 0x21, 0x83, 0x9d, 0x27, 0x2f, 0x66, 0xdd, 0xb3, - 0x5b, 0x8a, 0xe6, 0xbc, 0x03, 0x4c, 0x70, 0x05, 0x2f, 0x1f, 0x1d, 0x56, 0x2a, 0x4b, 0x4f, 0x67, - 0xc5, 0x83, 0xb0, 0xd0, 0xfb, 0x50, 0x56, 0xd2, 0xf4, 0x4c, 0x72, 0x3d, 0x2f, 0xb2, 0x38, 0x94, - 0xaa, 0x20, 0x55, 0x1a, 0x51, 0x98, 0x54, 0xc2, 0x0d, 0xa8, 0x76, 0x79, 0x2a, 0xd3, 0x45, 0x64, - 0xa4, 0x6f, 0xd5, 0xbf, 0x8c, 0x88, 0x10, 0x6c, 0x1c, 0xd3, 0x80, 0xfe, 0x06, 0x90, 0x12, 0xed, - 0x99, 0xb5, 0xcb, 0x28, 0x53, 0xfa, 0x89, 0x35, 0xdb, 0xfa, 0x6e, 0x17, 0x23, 0xd9, 0x38, 0x41, - 0x0f, 0x5a, 0x85, 0x19, 0x31, 0xba, 0xa5, 0xdb, 0xca, 0x0e, 0x69, 0xf6, 0xed, 0x16, 0xd5, 0xec, - 0xf2, 0x34, 0x8f, 0x7d, 0xfc, 0xe1, 0x6b, 0x21, 0x81, 0x8e, 0x13, 0xa5, 0xd0, 0x3b, 0x30, 0xb9, - 0x63, 0x58, 0xdb, 0x6a, 0xbb, 0x4d, 0x74, 0x17, 0x69, 0x86, 0x23, 0xcd, 0xb0, 0xd5, 0x58, 0x8e, - 0xd0, 0x70, 0x8c, 0x1b, 0xd9, 0x70, 0x4e, 0x20, 0x37, 0x2c, 0xa3, 0xb5, 0x66, 0xf4, 0x74, 0xea, - 0x94, 0x44, 0xe7, 0xbc, 0x14, 0x73, 0x6e, 0x21, 0x89, 0xe1, 0xc9, 0x61, 0xe5, 0x52, 0x72, 0x05, - 0xec, 0x33, 0xe1, 0x64, 0x6c, 0xb4, 0x0b, 0xc0, 0xe3, 0x82, 0x73, 0xfc, 0x66, 0xf9, 0xf1, 0xbb, - 0x93, 0x25, 0xea, 0x24, 0x9e, 0x40, 0xe7, 0x49, 0xce, 0x23, 0xe3, 0x00, 0x36, 0xfb, 0x4a, 0x51, - 0x22, 0x6d, 0xd5, 0x76, 0x79, 0x8e, 0xef, 0x75, 0x2d, 0xdb, 0x5e, 0x7b, 0x72, 0x81, 0xa7, 0xa9, - 0x28, 0x22, 0x8e, 0x2b, 0xe1, 0x5d, 0x3a, 0xe2, 0xcd, 0xe6, 0x74, 0x3a, 0x9d, 0x8f, 0xd7, 0xa5, - 0xe3, 0x9b, 0xf6, 0xcc, 0xba, 0x74, 0x02, 0x90, 0x4f, 0xbf, 0x25, 0xfe, 0x55, 0x0e, 0xa6, 0x7d, - 0xe6, 0xcc, 0x5d, 0x3a, 0x09, 0x22, 0x7f, 0xec, 0x76, 0x1e, 0xdc, 0xed, 0xfc, 0xa5, 0x04, 0xe3, - 0xfe, 0xd2, 0xfd, 0xfe, 0x75, 0xce, 0xf8, 0xb6, 0xa5, 0xd4, 0xf2, 0xff, 0x9b, 0x0b, 0x4e, 0xe0, - 0x0f, 0xbe, 0x7d, 0xe3, 0x87, 0xb7, 0x28, 0xcb, 0x5f, 0xe7, 0x61, 0x32, 0x7a, 0x1a, 0x43, 0xaf, - 0xfc, 0xd2, 0xc0, 0x57, 0xfe, 0x06, 0xcc, 0xec, 0xf4, 0x34, 0xad, 0xcf, 0x97, 0x21, 0xf0, 0xd4, - 0xef, 0xbc, 0xd2, 0xbd, 0x28, 0x24, 0x67, 0x96, 0x13, 0x78, 0x70, 0xa2, 0x64, 0x4a, 0xc7, 0x42, - 0xfe, 0x44, 0x1d, 0x0b, 0xb1, 0x07, 0xf4, 0xc2, 0x31, 0x1e, 0xd0, 0x13, 0xbb, 0x0f, 0x86, 0x4e, - 0xd0, 0x7d, 0x70, 0x92, 0x76, 0x81, 0x84, 0x20, 0x36, 0xb0, 0x7b, 0xf5, 0x45, 0x38, 0x2f, 0xc4, - 0x28, 0x7f, 0xc9, 0xd7, 0xa9, 0x65, 0x68, 0x1a, 0xb1, 0x96, 0x7a, 0xdd, 0x6e, 0x5f, 0x7e, 0x0b, - 0xc6, 0xc3, 0x3d, 0x2a, 0xce, 0x4e, 0x3b, 0x6d, 0x32, 0xe2, 0xad, 0x34, 0xb0, 0xd3, 0xce, 0x38, - 0xf6, 0x38, 0xe4, 0xbf, 0x97, 0x60, 0x36, 0xb9, 0x17, 0x15, 0x69, 0x30, 0xde, 0x55, 0x0e, 0x82, - 0xfd, 0xc1, 0xd2, 0x09, 0x6f, 0xb1, 0x78, 0x73, 0xc2, 0x5a, 0x08, 0x0b, 0x47, 0xb0, 0xe5, 0xef, - 0x25, 0x98, 0x4b, 0x69, 0x0b, 0x38, 0x5d, 0x4b, 0xd0, 0x07, 0x50, 0xea, 0x2a, 0x07, 0xcd, 0x9e, - 0xd5, 0x21, 0x27, 0xbe, 0xb7, 0xe3, 0x11, 0x63, 0x4d, 0xa0, 0x60, 0x0f, 0x4f, 0xfe, 0x4f, 0x09, - 0x5e, 0x48, 0xad, 0x65, 0xd0, 0xed, 0x50, 0x07, 0x83, 0x1c, 0xe9, 0x60, 0x40, 0x71, 0xc1, 0xe7, - 0xd4, 0xc0, 0xf0, 0xb9, 0x04, 0xe5, 0xb4, 0xef, 0x3c, 0x74, 0x2b, 0x64, 0xe4, 0x4b, 0x11, 0x23, - 0xa7, 0x62, 0x72, 0xcf, 0xc9, 0xc6, 0xff, 0x91, 0x60, 0x36, 0xf9, 0x7b, 0x17, 0xbd, 0x1e, 0xb2, - 0xb0, 0x12, 0xb1, 0x70, 0x22, 0x22, 0x25, 0xec, 0xfb, 0x08, 0xc6, 0xc5, 0x57, 0xb1, 0x80, 0x11, - 0x7b, 0x2f, 0x27, 0x45, 0x74, 0x01, 0xe1, 0xd6, 0xa0, 0xdc, 0xab, 0xc2, 0x63, 0x38, 0x82, 0x26, - 0xff, 0x43, 0x0e, 0x86, 0x9a, 0x2d, 0x45, 0x23, 0xa7, 0x50, 0x0c, 0xbe, 0x1b, 0x2a, 0x06, 0x07, - 0xfd, 0xc7, 0x11, 0xb7, 0x2a, 0xb5, 0x0e, 0xc4, 0x91, 0x3a, 0xf0, 0xd5, 0x4c, 0x68, 0x4f, 0x2f, - 0x01, 0xff, 0x0c, 0x46, 0x3c, 0xa5, 0xc7, 0xcb, 0x4c, 0xf2, 0x7f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, - 0x98, 0x79, 0x6d, 0x27, 0x54, 0x0f, 0xe4, 0x33, 0x7c, 0x78, 0x04, 0x74, 0x55, 0xdd, 0x0a, 0xc0, - 0xe9, 0x98, 0xf5, 0x7b, 0x24, 0xe3, 0x85, 0xc1, 0x5b, 0x30, 0x4e, 0x15, 0xab, 0x43, 0xa8, 0x77, - 0x27, 0x9f, 0xe7, 0xbe, 0xe8, 0xf5, 0x59, 0x6f, 0x86, 0xa8, 0x38, 0xc2, 0x7d, 0xfe, 0x2e, 0x8c, - 0x85, 0x94, 0x1d, 0xab, 0xe1, 0xf5, 0xff, 0x25, 0x78, 0x69, 0xe0, 0x8d, 0x09, 0xaa, 0x87, 0x0e, - 0x49, 0x35, 0x72, 0x48, 0xe6, 0xd3, 0x01, 0x9e, 0x5f, 0xe3, 0x54, 0xfd, 0xda, 0xe3, 0xef, 0xe6, - 0xcf, 0x7c, 0xf3, 0xdd, 0xfc, 0x99, 0x6f, 0xbf, 0x9b, 0x3f, 0xf3, 0xb7, 0x47, 0xf3, 0xd2, 0xe3, - 0xa3, 0x79, 0xe9, 0x9b, 0xa3, 0x79, 0xe9, 0xdb, 0xa3, 0x79, 0xe9, 0x17, 0x47, 0xf3, 0xd2, 0xbf, - 0x7c, 0x3f, 0x7f, 0xe6, 0x83, 0xa2, 0x80, 0xfb, 0x5d, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xf0, - 0x40, 0xcd, 0xc4, 0x3c, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 55239b8d..6c90cb3c 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -649,7 +649,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the + // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; @@ -872,7 +872,7 @@ message PodSecurityPolicySpec { repeated AllowedFlexVolume allowedFlexVolumes = 18; // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value means no CSI drivers can run inline within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. // +optional repeated AllowedCSIDriver allowedCSIDrivers = 23; @@ -902,6 +902,12 @@ message PodSecurityPolicySpec { // This requires the ProcMountType feature flag to be enabled. // +optional repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; } // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for @@ -1104,6 +1110,21 @@ message RunAsUserStrategyOptions { repeated IDRange ranges = 2; } +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. // Deprecated: use SELinuxStrategyOptions from policy API Group instead. message SELinuxStrategyOptions { diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index c34ea599..8f581ef1 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -929,7 +929,7 @@ type PodSecurityPolicySpec struct { // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value means no CSI drivers can run inline within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. // +optional AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -956,6 +956,11 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -1171,6 +1176,25 @@ const ( SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. @@ -1186,6 +1210,7 @@ type PodSecurityPolicyList struct { Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. @@ -1271,7 +1296,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the + // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 654f34ae..a7eb2ec9 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -379,7 +379,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -470,10 +470,11 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -596,6 +597,16 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index a67968af..cb610179 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -124,7 +124,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -280,7 +280,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -595,7 +595,7 @@ func (in *IngressBackend) DeepCopy() *IngressBackend { func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) @@ -826,7 +826,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) @@ -979,7 +979,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -1085,6 +1085,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } return } @@ -1147,7 +1152,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -1356,6 +1361,32 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 86bd80c8..c9b22fc7 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -17,38 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto - - It has these top-level messages: - IPBlock - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,39 +48,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{0} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{1} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{2} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) +} -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_1c72867a70a7cc90, []int{3} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{4} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{5} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{6} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{7} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func init() { proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") @@ -105,10 +282,70 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptor_1c72867a70a7cc90) +} + +var fileDescriptor_1c72867a70a7cc90 = []byte{ + // 804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, + 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, + 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, + 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, + 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, + 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, + 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, + 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, + 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, + 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, + 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, + 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, + 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, + 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, + 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, + 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, + 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, + 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, + 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, + 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, + 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, + 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, + 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, + 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, + 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, + 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, + 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, + 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, + 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, + 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, + 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, + 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, + 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, + 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, + 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, + 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, + 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, + 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, + 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, + 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, + 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, + 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, + 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, + 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, + 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, + 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, + 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, + 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, + 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, + 0x67, 0x08, 0x00, 0x00, +} + func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -116,36 +353,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for _, s := range m.Except { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -153,33 +390,42 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -187,41 +433,50 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -229,41 +484,50 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -271,37 +535,46 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -309,47 +582,58 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n4, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n5, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n6, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -357,33 +641,41 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n7, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x12 } - return i, nil + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -391,70 +683,80 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n8, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Egress) > 0 { - for _, msg := range m.Egress { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.CIDR) @@ -469,6 +771,9 @@ func (m *IPBlock) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -479,6 +784,9 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -497,6 +805,9 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -515,6 +826,9 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -529,6 +843,9 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PodSelector != nil { @@ -547,6 +864,9 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Protocol != nil { @@ -561,6 +881,9 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.PodSelector.Size() @@ -587,14 +910,7 @@ func (m *NetworkPolicySpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -615,7 +931,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -625,9 +941,19 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, `}`, }, "") return s @@ -636,9 +962,19 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, `}`, }, "") return s @@ -647,9 +983,14 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -659,9 +1000,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -672,7 +1013,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -681,10 +1022,20 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -713,7 +1064,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -741,7 +1092,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -751,6 +1102,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -770,7 +1124,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -780,6 +1134,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -794,6 +1151,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -821,7 +1181,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -849,7 +1209,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -858,6 +1218,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -879,7 +1242,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -888,6 +1251,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -904,6 +1270,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -931,7 +1300,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -959,7 +1328,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -968,6 +1337,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -990,7 +1362,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -999,6 +1371,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1016,6 +1391,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1043,7 +1421,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1071,7 +1449,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1080,6 +1458,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1102,7 +1483,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1111,6 +1492,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1128,6 +1512,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1155,7 +1542,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1183,7 +1570,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1192,6 +1579,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1213,7 +1603,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1222,6 +1612,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1239,6 +1632,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1266,7 +1662,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1294,7 +1690,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1303,11 +1699,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.PodSelector = &v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1327,7 +1726,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1336,11 +1735,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1360,7 +1762,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1369,6 +1771,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1388,6 +1793,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1415,7 +1823,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1443,7 +1851,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1453,6 +1861,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1473,7 +1884,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1482,11 +1893,14 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.Port = &intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1501,6 +1915,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1528,7 +1945,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1556,7 +1973,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1565,6 +1982,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1586,7 +2006,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1595,6 +2015,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1617,7 +2040,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1626,6 +2049,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1648,7 +2074,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1658,6 +2084,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1672,6 +2101,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1738,10 +2170,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1770,6 +2205,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1788,62 +2226,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, - 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, - 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, - 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, - 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, - 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, - 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, - 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, - 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, - 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, - 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, - 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, - 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, - 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, - 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, - 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, - 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, - 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, - 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, - 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, - 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, - 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, - 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, - 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, - 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, - 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, - 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, - 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, - 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, - 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, - 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, - 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, - 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, - 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, - 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, - 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, - 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, - 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, - 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, - 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, - 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, - 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, - 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, - 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, - 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, - 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, - 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, - 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, - 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, - 0x67, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index cbbb2652..3cb73804 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -92,7 +92,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule + // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 59331111..38a640f0 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -107,7 +107,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule + // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index cfcd0c54..188b72a1 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -60,7 +60,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index d1e4e884..1833e978 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -139,7 +139,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 14430cba..8ed56009 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -17,34 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto - - It has these top-level messages: - HTTPIngressPath - HTTPIngressRuleValue - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -57,45 +43,285 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{0} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{1} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{2} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_Ingress proto.InternalMessageInfo -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{3} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{4} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_IngressList proto.InternalMessageInfo + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{5} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRule proto.InternalMessageInfo + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{6} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{7} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{8} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{9} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func init() { proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") @@ -109,10 +335,70 @@ func init() { proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_5bea11de0ceb8f53) +} + +var fileDescriptor_5bea11de0ceb8f53 = []byte{ + // 812 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, + 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, + 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, + 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, + 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, + 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, + 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, + 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, + 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, + 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, + 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, + 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, + 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, + 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, + 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, + 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, + 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, + 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, + 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, + 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, + 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, + 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, + 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, + 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, + 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, + 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, + 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, + 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, + 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, + 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, + 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, + 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, + 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, + 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, + 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, + 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, + 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, + 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, + 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, + 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, + 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, + 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, + 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, + 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, + 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, + 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, + 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, + 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, + 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, + 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, +} + func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -120,29 +406,37 @@ func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n1, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -150,29 +444,36 @@ func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -180,41 +481,52 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { } func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n3, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n4, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -222,29 +534,37 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { } func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n5, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -252,37 +572,46 @@ func (m *IngressList) Marshal() (dAtA []byte, err error) { } func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -290,29 +619,37 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { } func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n7, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -320,27 +657,34 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n8, err := m.HTTP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -348,51 +692,62 @@ func (m *IngressSpec) Marshal() (dAtA []byte, err error) { } func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n9, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n9 } if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -400,25 +755,32 @@ func (m *IngressStatus) Marshal() (dAtA []byte, err error) { } func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n10, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -426,42 +788,47 @@ func (m *IngressTLS) Marshal() (dAtA []byte, err error) { } func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 if len(m.Hosts) > 0 { - for _, s := range m.Hosts { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -472,6 +839,9 @@ func (m *HTTPIngressPath) Size() (n int) { } func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -484,6 +854,9 @@ func (m *HTTPIngressRuleValue) Size() (n int) { } func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -496,6 +869,9 @@ func (m *Ingress) Size() (n int) { } func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ServiceName) @@ -506,6 +882,9 @@ func (m *IngressBackend) Size() (n int) { } func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -520,6 +899,9 @@ func (m *IngressList) Size() (n int) { } func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Host) @@ -530,6 +912,9 @@ func (m *IngressRule) Size() (n int) { } func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HTTP != nil { @@ -540,6 +925,9 @@ func (m *IngressRuleValue) Size() (n int) { } func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Backend != nil { @@ -562,6 +950,9 @@ func (m *IngressSpec) Size() (n int) { } func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LoadBalancer.Size() @@ -570,6 +961,9 @@ func (m *IngressStatus) Size() (n int) { } func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Hosts) > 0 { @@ -584,14 +978,7 @@ func (m *IngressTLS) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -611,8 +998,13 @@ func (this *HTTPIngressRuleValue) String() string { if this == nil { return "nil" } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, `}`, }, "") return s @@ -622,7 +1014,7 @@ func (this *Ingress) String() string { return "nil" } s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -635,7 +1027,7 @@ func (this *IngressBackend) String() string { } s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -644,9 +1036,14 @@ func (this *IngressList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -667,7 +1064,7 @@ func (this *IngressRuleValue) String() string { return "nil" } s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, `}`, }, "") return s @@ -676,10 +1073,20 @@ func (this *IngressSpec) String() string { if this == nil { return "nil" } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -689,7 +1096,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -728,7 +1135,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -756,7 +1163,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -766,6 +1173,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -785,7 +1195,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -794,6 +1204,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -810,6 +1223,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -837,7 +1253,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -865,7 +1281,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -874,6 +1290,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -891,6 +1310,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -918,7 +1340,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -946,7 +1368,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -955,6 +1377,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -976,7 +1401,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -985,6 +1410,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1006,7 +1434,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1015,6 +1443,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1031,6 +1462,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1058,7 +1492,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1086,7 +1520,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1096,6 +1530,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1115,7 +1552,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1124,6 +1561,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1140,6 +1580,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1167,7 +1610,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1195,7 +1638,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1204,6 +1647,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1225,7 +1671,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1234,6 +1680,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1251,6 +1700,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1278,7 +1730,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1306,7 +1758,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1316,6 +1768,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1335,7 +1790,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1344,6 +1799,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1360,6 +1818,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1387,7 +1848,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1415,7 +1876,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1424,6 +1885,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,6 +1907,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1470,7 +1937,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1498,7 +1965,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1507,6 +1974,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1531,7 +2001,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1540,6 +2010,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1562,7 +2035,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1571,6 +2044,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1588,6 +2064,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1615,7 +2094,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1643,7 +2122,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1652,6 +2131,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1668,6 +2150,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1695,7 +2180,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1723,7 +2208,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1733,6 +2218,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1752,7 +2240,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1762,6 +2250,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1776,6 +2267,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1842,10 +2336,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1874,6 +2371,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1892,62 +2392,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 812 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, - 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, - 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, - 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, - 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, - 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, - 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, - 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, - 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, - 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, - 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, - 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, - 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, - 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, - 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, - 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, - 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, - 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, - 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, - 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, - 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, - 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, - 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, - 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, - 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, - 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, - 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, - 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, - 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, - 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, - 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, - 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, - 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, - 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, - 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, - 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, - 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, - 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, - 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, - 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, - 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, - 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, - 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, - 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, - 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, - 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, - 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, - 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, - 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, - 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 7df19138..a72545c8 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -64,17 +64,17 @@ message HTTPIngressRuleValue { // based virtual hosting etc. message Ingress { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; } @@ -91,7 +91,7 @@ message IngressBackend { // IngressList is a collection of Ingress. message IngressList { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 63bf2d52..37277bf8 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -32,17 +32,17 @@ import ( type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -53,7 +53,7 @@ type Ingress struct { type IngressList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 9e05b7f1..4ae5e32d 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -48,9 +48,9 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -69,7 +69,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index 6342c985..16ac936a 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -111,7 +111,7 @@ func (in *IngressBackend) DeepCopy() *IngressBackend { func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1alpha1/doc.go index f7f8b78b..dfe99540 100644 --- a/vendor/k8s.io/api/node/v1alpha1/doc.go +++ b/vendor/k8s.io/api/node/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=node.k8s.io diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go index 16f5af92..34f4dd6d 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -17,27 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto - - It has these top-level messages: - RuntimeClass - RuntimeClassList - RuntimeClassSpec -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -50,27 +48,264 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} -func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } -func (*RuntimeClassSpec) ProtoMessage() {} -func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } +func (*RuntimeClassSpec) ProtoMessage() {} +func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{3} +} +func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassSpec.Merge(m, src) +} +func (m *RuntimeClassSpec) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{4} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1alpha1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1alpha1.Overhead.PodFixedEntry") proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1alpha1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1alpha1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_82a78945ab308218) +} + +var fileDescriptor_82a78945ab308218 = []byte{ + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbf, 0x6f, 0xd3, 0x4e, + 0x14, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x5f, 0x7f, 0x2b, 0x14, 0x65, 0x70, 0x2a, 0x0b, + 0xa1, 0x0a, 0xa9, 0x67, 0x5a, 0xa1, 0xaa, 0x62, 0x00, 0x61, 0x7e, 0x08, 0x44, 0x69, 0xc1, 0x2d, + 0x0b, 0x62, 0xe0, 0x62, 0x3f, 0x1c, 0x13, 0xdb, 0x67, 0xd9, 0xe7, 0x88, 0x6c, 0x88, 0x05, 0x89, + 0x89, 0x89, 0xff, 0x06, 0xe6, 0x8e, 0x9d, 0x50, 0xa7, 0x96, 0x86, 0xff, 0x81, 0x81, 0x09, 0x9d, + 0x7d, 0x4e, 0x9c, 0xa4, 0xa1, 0x61, 0xf3, 0xdd, 0x7d, 0x7e, 0xdc, 0xfb, 0xbc, 0x7b, 0xc6, 0x77, + 0x3b, 0x3b, 0x31, 0x71, 0x99, 0xde, 0x49, 0x5a, 0x10, 0x05, 0xc0, 0x21, 0xd6, 0xbb, 0x10, 0xd8, + 0x2c, 0xd2, 0xe5, 0x01, 0x0d, 0x5d, 0x3d, 0x60, 0x36, 0xe8, 0xdd, 0x4d, 0xea, 0x85, 0x6d, 0xba, + 0xa9, 0x3b, 0x10, 0x40, 0x44, 0x39, 0xd8, 0x24, 0x8c, 0x18, 0x67, 0x4a, 0x3d, 0x43, 0x12, 0x1a, + 0xba, 0x44, 0x20, 0x49, 0x8e, 0x6c, 0x6c, 0x38, 0x2e, 0x6f, 0x27, 0x2d, 0x62, 0x31, 0x5f, 0x77, + 0x98, 0xc3, 0xf4, 0x94, 0xd0, 0x4a, 0xde, 0xa4, 0xab, 0x74, 0x91, 0x7e, 0x65, 0x42, 0x0d, 0xad, + 0x60, 0x69, 0xb1, 0x48, 0x58, 0x8e, 0x9b, 0x35, 0x6e, 0x0e, 0x31, 0x3e, 0xb5, 0xda, 0x6e, 0x00, + 0x51, 0x4f, 0x0f, 0x3b, 0x4e, 0x4a, 0x8a, 0x20, 0x66, 0x49, 0x64, 0xc1, 0x3f, 0xb1, 0x62, 0xdd, + 0x07, 0x4e, 0x2f, 0xf2, 0xd2, 0xa7, 0xb1, 0xa2, 0x24, 0xe0, 0xae, 0x3f, 0x69, 0xb3, 0x7d, 0x19, + 0x21, 0xb6, 0xda, 0xe0, 0xd3, 0x71, 0x9e, 0x76, 0x5c, 0xc6, 0x95, 0xfd, 0x2e, 0x44, 0x6d, 0xa0, + 0xb6, 0xf2, 0x1d, 0xe1, 0x4a, 0xc8, 0xec, 0x87, 0xee, 0x3b, 0xb0, 0xeb, 0x68, 0x6d, 0x6e, 0xbd, + 0xba, 0x75, 0x83, 0x4c, 0x8b, 0x98, 0xe4, 0x34, 0xf2, 0x4c, 0x52, 0x1e, 0x04, 0x3c, 0xea, 0x19, + 0x1f, 0xd1, 0xd1, 0x69, 0xb3, 0xd4, 0x3f, 0x6d, 0x56, 0xf2, 0xfd, 0xdf, 0xa7, 0xcd, 0xe6, 0x64, + 0xbe, 0xc4, 0x94, 0x91, 0xed, 0xba, 0x31, 0xff, 0x70, 0xf6, 0x57, 0xc8, 0x1e, 0xf5, 0xe1, 0xd3, + 0x59, 0x73, 0x63, 0x96, 0x0e, 0x90, 0xe7, 0x09, 0x0d, 0xb8, 0xcb, 0x7b, 0xe6, 0xa0, 0x96, 0x46, + 0x07, 0x2f, 0x8d, 0x5c, 0x52, 0x59, 0xc1, 0x73, 0x1d, 0xe8, 0xd5, 0xd1, 0x1a, 0x5a, 0x5f, 0x34, + 0xc5, 0xa7, 0x72, 0x1f, 0x2f, 0x74, 0xa9, 0x97, 0x40, 0xbd, 0xbc, 0x86, 0xd6, 0xab, 0x5b, 0xa4, + 0x50, 0xf7, 0xc0, 0x8b, 0x84, 0x1d, 0x27, 0x0d, 0x62, 0xd2, 0x2b, 0x23, 0xdf, 0x2a, 0xef, 0x20, + 0xed, 0x1b, 0xc2, 0x35, 0x33, 0x4b, 0xfd, 0x9e, 0x47, 0xe3, 0x58, 0x79, 0x8d, 0x2b, 0xa2, 0xcf, + 0x36, 0xe5, 0x34, 0x75, 0x1c, 0x4d, 0x75, 0x42, 0x3d, 0x26, 0x02, 0x4d, 0xba, 0x9b, 0x64, 0xbf, + 0xf5, 0x16, 0x2c, 0xfe, 0x14, 0x38, 0x35, 0x14, 0x19, 0x2a, 0x1e, 0xee, 0x99, 0x03, 0x55, 0x65, + 0x17, 0xcf, 0xc7, 0x21, 0x58, 0xf2, 0xee, 0xd7, 0xa7, 0xf7, 0xac, 0x78, 0xaf, 0x83, 0x10, 0x2c, + 0xa3, 0x26, 0x75, 0xe7, 0xc5, 0xca, 0x4c, 0x55, 0xb4, 0xaf, 0x08, 0xaf, 0x14, 0x81, 0xa2, 0x41, + 0xca, 0xab, 0x89, 0x22, 0xc8, 0x6c, 0x45, 0x08, 0x76, 0x5a, 0xc2, 0x4a, 0xfe, 0x2e, 0xf2, 0x9d, + 0x42, 0x01, 0x4f, 0xf0, 0x82, 0xcb, 0xc1, 0x8f, 0xeb, 0xe5, 0xf4, 0xd5, 0x5d, 0x9b, 0xad, 0x02, + 0x63, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x33, 0xd3, 0xd0, 0x7e, 0x8d, 0xdd, 0x5f, 0x94, 0xa6, + 0xdc, 0xc6, 0xcb, 0x72, 0x14, 0x1e, 0xd1, 0xc0, 0xf6, 0x20, 0xca, 0x9a, 0x6f, 0x5c, 0x91, 0x12, + 0xcb, 0xe6, 0xc8, 0xa9, 0x39, 0x86, 0x56, 0x76, 0x71, 0x85, 0xc9, 0x07, 0x2f, 0x63, 0xd6, 0x2e, + 0x1f, 0x0d, 0xa3, 0x26, 0xea, 0xcd, 0x57, 0xe6, 0x40, 0x41, 0x39, 0xc4, 0x58, 0x0c, 0xa4, 0x9d, + 0x78, 0x6e, 0xe0, 0xd4, 0xe7, 0x52, 0xbd, 0xab, 0xd3, 0xf5, 0x0e, 0x06, 0x58, 0x63, 0x59, 0x3c, + 0x82, 0xe1, 0xda, 0x2c, 0xe8, 0x68, 0x5f, 0xca, 0xb8, 0x70, 0xa4, 0x84, 0xb8, 0x26, 0x64, 0x0e, + 0xc0, 0x03, 0x8b, 0xb3, 0x48, 0x4e, 0xf4, 0xf6, 0x2c, 0x36, 0x64, 0xaf, 0x40, 0xcc, 0xe6, 0x7a, + 0x55, 0x06, 0x55, 0x2b, 0x1e, 0x99, 0x23, 0x0e, 0xca, 0x0b, 0x5c, 0xe5, 0xcc, 0x13, 0x3f, 0x18, + 0x97, 0x05, 0x79, 0x33, 0xd5, 0xa2, 0xa1, 0x98, 0x6c, 0xf1, 0x2a, 0x0e, 0x07, 0x30, 0xe3, 0x7f, + 0x29, 0x5c, 0x1d, 0xee, 0xc5, 0x66, 0x51, 0xa7, 0x71, 0x07, 0xff, 0x37, 0x71, 0x9f, 0x0b, 0x46, + 0x78, 0xb5, 0x38, 0xc2, 0x8b, 0x85, 0x91, 0x34, 0xc8, 0xd1, 0xb9, 0x5a, 0x3a, 0x3e, 0x57, 0x4b, + 0x27, 0xe7, 0x6a, 0xe9, 0x7d, 0x5f, 0x45, 0x47, 0x7d, 0x15, 0x1d, 0xf7, 0x55, 0x74, 0xd2, 0x57, + 0xd1, 0x8f, 0xbe, 0x8a, 0x3e, 0xff, 0x54, 0x4b, 0x2f, 0x2b, 0x79, 0x10, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xa8, 0x77, 0xef, 0x80, 0x9b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -78,33 +313,42 @@ func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -112,65 +356,188 @@ func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + return dAtA[:n], nil +} + +func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { +func (m *Scheduling) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) - i += copy(dAtA[i:], m.RuntimeHandler) - return i, nil + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } + func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -181,6 +548,9 @@ func (m *RuntimeClass) Size() (n int) { } func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -195,32 +565,79 @@ func (m *RuntimeClassList) Size() (n int) { } func (m *RuntimeClassSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.RuntimeHandler) n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} func (this *RuntimeClass) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -230,9 +647,14 @@ func (this *RuntimeClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -241,19 +663,229 @@ func (this *RuntimeClassSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RuntimeClassSpec{`, - `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&RuntimeClassSpec{`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *RuntimeClass) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -270,7 +902,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -298,7 +930,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -307,6 +939,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -328,7 +963,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -337,6 +972,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -353,6 +991,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -380,7 +1021,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -408,7 +1049,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -417,6 +1058,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -438,7 +1082,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -447,6 +1091,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -464,6 +1111,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -491,7 +1141,7 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -519,7 +1169,7 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -529,11 +1179,300 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -543,6 +1482,9 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -609,10 +1551,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -641,6 +1586,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -659,38 +1607,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 421 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x6b, 0xd4, 0x40, - 0x14, 0xc7, 0x33, 0xb5, 0x85, 0x75, 0x5a, 0x4b, 0xc9, 0x41, 0xc2, 0x1e, 0xa6, 0x65, 0x0f, 0x52, - 0x04, 0x67, 0xdc, 0x22, 0xe2, 0x49, 0x30, 0x5e, 0x14, 0x2b, 0x42, 0xbc, 0x89, 0x07, 0x27, 0xc9, - 0x33, 0x19, 0xb3, 0xc9, 0x0c, 0x99, 0x49, 0xc0, 0x9b, 0x1f, 0xc1, 0x2f, 0xa4, 0xe7, 0x3d, 0xf6, - 0xd8, 0x53, 0x71, 0xe3, 0x17, 0x91, 0x99, 0x64, 0xbb, 0xdb, 0x2e, 0xc5, 0xbd, 0xe5, 0xbd, 0xf9, - 0xff, 0x7f, 0xef, 0xfd, 0x5f, 0xf0, 0xab, 0xe2, 0x85, 0xa6, 0x42, 0xb2, 0xa2, 0x89, 0xa1, 0xae, - 0xc0, 0x80, 0x66, 0x2d, 0x54, 0xa9, 0xac, 0xd9, 0xf0, 0xc0, 0x95, 0x60, 0x95, 0x4c, 0x81, 0xb5, - 0x53, 0x3e, 0x53, 0x39, 0x9f, 0xb2, 0x0c, 0x2a, 0xa8, 0xb9, 0x81, 0x94, 0xaa, 0x5a, 0x1a, 0xe9, - 0x07, 0xbd, 0x92, 0x72, 0x25, 0xa8, 0x55, 0xd2, 0xa5, 0x72, 0xfc, 0x24, 0x13, 0x26, 0x6f, 0x62, - 0x9a, 0xc8, 0x92, 0x65, 0x32, 0x93, 0xcc, 0x19, 0xe2, 0xe6, 0xab, 0xab, 0x5c, 0xe1, 0xbe, 0x7a, - 0xd0, 0xf8, 0xd9, 0x6a, 0x64, 0xc9, 0x93, 0x5c, 0x54, 0x50, 0x7f, 0x67, 0xaa, 0xc8, 0x6c, 0x43, - 0xb3, 0x12, 0x0c, 0x67, 0xed, 0xc6, 0xf8, 0x31, 0xbb, 0xcb, 0x55, 0x37, 0x95, 0x11, 0x25, 0x6c, - 0x18, 0x9e, 0xff, 0xcf, 0xa0, 0x93, 0x1c, 0x4a, 0x7e, 0xdb, 0x37, 0xf9, 0x8d, 0xf0, 0x41, 0xd4, - 0x4b, 0x5e, 0xcf, 0xb8, 0xd6, 0xfe, 0x17, 0x3c, 0xb2, 0x4b, 0xa5, 0xdc, 0xf0, 0x00, 0x9d, 0xa0, - 0xd3, 0xfd, 0xb3, 0xa7, 0x74, 0x75, 0x8b, 0x6b, 0x36, 0x55, 0x45, 0x66, 0x1b, 0x9a, 0x5a, 0x35, - 0x6d, 0xa7, 0xf4, 0x43, 0xfc, 0x0d, 0x12, 0xf3, 0x1e, 0x0c, 0x0f, 0xfd, 0xf9, 0xd5, 0xb1, 0xd7, - 0x5d, 0x1d, 0xe3, 0x55, 0x2f, 0xba, 0xa6, 0xfa, 0xe7, 0x78, 0x57, 0x2b, 0x48, 0x82, 0x1d, 0x47, - 0x7f, 0x4c, 0xef, 0xba, 0x34, 0x5d, 0xdf, 0xeb, 0xa3, 0x82, 0x24, 0x3c, 0x18, 0xb8, 0xbb, 0xb6, - 0x8a, 0x1c, 0x65, 0xf2, 0x0b, 0xe1, 0xa3, 0x75, 0xe1, 0xb9, 0xd0, 0xc6, 0xff, 0xbc, 0x11, 0x82, - 0x6e, 0x17, 0xc2, 0xba, 0x5d, 0x84, 0xa3, 0x61, 0xd4, 0x68, 0xd9, 0x59, 0x0b, 0xf0, 0x0e, 0xef, - 0x09, 0x03, 0xa5, 0x0e, 0x76, 0x4e, 0xee, 0x9d, 0xee, 0x9f, 0x3d, 0xda, 0x2e, 0x41, 0xf8, 0x60, - 0x40, 0xee, 0xbd, 0xb5, 0xe6, 0xa8, 0x67, 0x4c, 0xa2, 0x9b, 0xeb, 0xdb, 0x64, 0xfe, 0x4b, 0x7c, - 0x38, 0xfc, 0xb6, 0x37, 0xbc, 0x4a, 0x67, 0x50, 0xbb, 0x10, 0xf7, 0xc3, 0x87, 0x03, 0xe1, 0x30, - 0xba, 0xf1, 0x1a, 0xdd, 0x52, 0x87, 0x74, 0xbe, 0x20, 0xde, 0xc5, 0x82, 0x78, 0x97, 0x0b, 0xe2, - 0xfd, 0xe8, 0x08, 0x9a, 0x77, 0x04, 0x5d, 0x74, 0x04, 0x5d, 0x76, 0x04, 0xfd, 0xe9, 0x08, 0xfa, - 0xf9, 0x97, 0x78, 0x9f, 0x46, 0xcb, 0x35, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x94, 0x34, 0x0e, - 0xef, 0x30, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index ca4e5e53..ac05f839 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -21,6 +21,8 @@ syntax = 'proto2'; package k8s.io.api.node.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,6 +30,13 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run // all containers in a pod. RuntimeClasses are (currently) manually defined by a @@ -36,19 +45,19 @@ option go_package = "v1alpha1"; // pod. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md message RuntimeClass { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } // RuntimeClassList is a list of RuntimeClass objects. message RuntimeClassList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -72,5 +81,38 @@ message RuntimeClassSpec { // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements // and is immutable. optional string runtimeHandler = 1; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 2; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 3; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index 2ce67c11..b5976710 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,12 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md type RuntimeClass struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -58,6 +59,46 @@ type RuntimeClassSpec struct { // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements // and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,3,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -66,7 +107,7 @@ type RuntimeClassSpec struct { type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index a51fa525..39000017 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -27,10 +27,19 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -39,7 +48,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -50,10 +59,22 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { return map_RuntimeClassSpec } +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go index 0d63110b..20f80518 100644 --- a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -21,15 +21,39 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) return } @@ -55,7 +79,7 @@ func (in *RuntimeClass) DeepCopyObject() runtime.Object { func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RuntimeClass, len(*in)) @@ -87,6 +111,16 @@ func (in *RuntimeClassList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { *out = *in + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } @@ -99,3 +133,33 @@ func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go index 0e8338cf..e87583ce 100644 --- a/vendor/k8s.io/api/node/v1beta1/doc.go +++ b/vendor/k8s.io/api/node/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=node.k8s.io diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go index 27251a8a..63992f43 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -17,26 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto - - It has these top-level messages: - RuntimeClass - RuntimeClassList -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,22 +48,233 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{3} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1beta1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1beta1.Overhead.PodFixedEntry") proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1beta1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1beta1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_f977b0dddc93b4ec) } + +var fileDescriptor_f977b0dddc93b4ec = []byte{ + // 666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xbd, 0x6f, 0xd3, 0x40, + 0x14, 0xcf, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x14, 0x53, 0x89, 0x28, 0x83, 0x53, 0x82, 0x90, 0xca, + 0xd0, 0x33, 0xad, 0x00, 0x55, 0x2c, 0x20, 0xf3, 0x21, 0x3e, 0x5b, 0x70, 0x61, 0x41, 0x0c, 0x5c, + 0xec, 0x87, 0x63, 0x12, 0xfb, 0xa2, 0xf3, 0x39, 0x22, 0x1b, 0x62, 0x41, 0x62, 0x62, 0xe1, 0xbf, + 0x81, 0xbd, 0x1b, 0x5d, 0x90, 0x3a, 0xb5, 0x34, 0xfc, 0x17, 0x4c, 0xe8, 0xec, 0x73, 0x72, 0x6d, + 0x9a, 0xb6, 0x6c, 0xbe, 0xf3, 0xef, 0xe3, 0xbd, 0xdf, 0xbb, 0x87, 0xef, 0xb4, 0xd7, 0x62, 0x12, + 0x30, 0xab, 0x9d, 0x34, 0x81, 0x47, 0x20, 0x20, 0xb6, 0x7a, 0x10, 0x79, 0x8c, 0x5b, 0xea, 0x07, + 0xed, 0x06, 0x56, 0xc4, 0x3c, 0xb0, 0x7a, 0x2b, 0x4d, 0x10, 0x74, 0xc5, 0xf2, 0x21, 0x02, 0x4e, + 0x05, 0x78, 0xa4, 0xcb, 0x99, 0x60, 0xc6, 0xc5, 0x0c, 0x48, 0x68, 0x37, 0x20, 0x12, 0x48, 0x14, + 0xb0, 0xb6, 0xec, 0x07, 0xa2, 0x95, 0x34, 0x89, 0xcb, 0x42, 0xcb, 0x67, 0x3e, 0xb3, 0x52, 0x7c, + 0x33, 0x79, 0x97, 0x9e, 0xd2, 0x43, 0xfa, 0x95, 0xe9, 0xd4, 0x1a, 0x9a, 0xa1, 0xcb, 0xb8, 0x34, + 0x3c, 0xec, 0x55, 0xbb, 0x3e, 0xc2, 0x84, 0xd4, 0x6d, 0x05, 0x11, 0xf0, 0xbe, 0xd5, 0x6d, 0xfb, + 0x29, 0x89, 0x43, 0xcc, 0x12, 0xee, 0xc2, 0x7f, 0xb1, 0x62, 0x2b, 0x04, 0x41, 0x8f, 0xf2, 0xb2, + 0x26, 0xb1, 0x78, 0x12, 0x89, 0x20, 0x1c, 0xb7, 0xb9, 0x79, 0x12, 0x21, 0x76, 0x5b, 0x10, 0xd2, + 0xc3, 0xbc, 0xc6, 0xcf, 0x22, 0x2e, 0x6d, 0xf4, 0x80, 0xb7, 0x80, 0x7a, 0xc6, 0x2f, 0x84, 0x4b, + 0x5d, 0xe6, 0x3d, 0x08, 0x3e, 0x80, 0x57, 0x45, 0x8b, 0x53, 0x4b, 0xe5, 0x55, 0x8b, 0x4c, 0x48, + 0x98, 0xe4, 0x2c, 0xf2, 0x5c, 0x31, 0xee, 0x47, 0x82, 0xf7, 0xed, 0xcf, 0x68, 0x6b, 0xb7, 0x5e, + 0x18, 0xec, 0xd6, 0x4b, 0xf9, 0xfd, 0xdf, 0xdd, 0x7a, 0x7d, 0x3c, 0x5e, 0xe2, 0xa8, 0xc4, 0x9e, + 0x06, 0xb1, 0xf8, 0xb4, 0x77, 0x2c, 0x64, 0x9d, 0x86, 0xf0, 0x65, 0xaf, 0xbe, 0x7c, 0x9a, 0x01, + 0x90, 0x17, 0x09, 0x8d, 0x44, 0x20, 0xfa, 0xce, 0xb0, 0x95, 0x5a, 0x1b, 0xcf, 0x1d, 0x28, 0xd2, + 0x98, 0xc7, 0x53, 0x6d, 0xe8, 0x57, 0xd1, 0x22, 0x5a, 0x9a, 0x75, 0xe4, 0xa7, 0x71, 0x0f, 0x4f, + 0xf7, 0x68, 0x27, 0x81, 0x6a, 0x71, 0x11, 0x2d, 0x95, 0x57, 0x89, 0xd6, 0xf6, 0xd0, 0x8b, 0x74, + 0xdb, 0x7e, 0x9a, 0xc3, 0xb8, 0x57, 0x46, 0xbe, 0x55, 0x5c, 0x43, 0x8d, 0x1f, 0x45, 0x5c, 0x71, + 0xb2, 0xd0, 0xef, 0x76, 0x68, 0x1c, 0x1b, 0x6f, 0x71, 0x49, 0x8e, 0xd9, 0xa3, 0x82, 0xa6, 0x8e, + 0xe5, 0xd5, 0x6b, 0xc7, 0xa9, 0xc7, 0x44, 0xa2, 0x49, 0x6f, 0x85, 0x6c, 0x34, 0xdf, 0x83, 0x2b, + 0x9e, 0x81, 0xa0, 0xb6, 0xa1, 0x42, 0xc5, 0xa3, 0x3b, 0x67, 0xa8, 0x6a, 0x5c, 0xc5, 0x33, 0x2d, + 0x1a, 0x79, 0x1d, 0xe0, 0x69, 0xf9, 0xb3, 0xf6, 0x39, 0x05, 0x9f, 0x79, 0x98, 0x5d, 0x3b, 0xf9, + 0x7f, 0xe3, 0x09, 0x2e, 0x31, 0x35, 0xb8, 0xea, 0x54, 0x5a, 0xcc, 0xa5, 0x13, 0x27, 0x6c, 0x57, + 0xe4, 0x38, 0xf3, 0x93, 0x33, 0x14, 0x30, 0x36, 0x31, 0x96, 0xcf, 0xca, 0x4b, 0x3a, 0x41, 0xe4, + 0x57, 0xcf, 0xa4, 0x72, 0x97, 0x27, 0xca, 0x6d, 0x0e, 0xa1, 0xf6, 0x59, 0xd9, 0xca, 0xe8, 0xec, + 0x68, 0x32, 0x8d, 0xef, 0x08, 0xcf, 0xeb, 0xf9, 0xc9, 0xf7, 0x61, 0xbc, 0x19, 0xcb, 0x90, 0x9c, + 0x2e, 0x43, 0xc9, 0x4e, 0x13, 0x9c, 0xcf, 0x9f, 0x65, 0x7e, 0xa3, 0xe5, 0xf7, 0x18, 0x4f, 0x07, + 0x02, 0xc2, 0xb8, 0x5a, 0x4c, 0xdf, 0xfc, 0x95, 0x89, 0x2d, 0xe8, 0x75, 0xd9, 0x73, 0x4a, 0x71, + 0xfa, 0x91, 0xe4, 0x3a, 0x99, 0x44, 0xe3, 0x5b, 0x11, 0x6b, 0x9d, 0x19, 0x0c, 0x57, 0xa4, 0xc2, + 0x26, 0x74, 0xc0, 0x15, 0x8c, 0xab, 0xad, 0xba, 0x71, 0x8a, 0x90, 0xc8, 0xba, 0xc6, 0xcb, 0x76, + 0x6b, 0x41, 0x39, 0x56, 0xf4, 0x5f, 0xce, 0x01, 0x03, 0xe3, 0x15, 0x2e, 0x0b, 0xd6, 0x91, 0x3b, + 0x1e, 0xb0, 0x28, 0xef, 0xc8, 0xd4, 0xfd, 0xe4, 0x76, 0xc9, 0x68, 0x5e, 0x0e, 0x61, 0xf6, 0x05, + 0x25, 0x5c, 0x1e, 0xdd, 0xc5, 0x8e, 0xae, 0x53, 0xbb, 0x8d, 0xcf, 0x8f, 0xd5, 0x73, 0xc4, 0x1a, + 0x2d, 0xe8, 0x6b, 0x34, 0xab, 0xad, 0x85, 0xbd, 0xbc, 0xb5, 0x6f, 0x16, 0xb6, 0xf7, 0xcd, 0xc2, + 0xce, 0xbe, 0x59, 0xf8, 0x38, 0x30, 0xd1, 0xd6, 0xc0, 0x44, 0xdb, 0x03, 0x13, 0xed, 0x0c, 0x4c, + 0xf4, 0x7b, 0x60, 0xa2, 0xaf, 0x7f, 0xcc, 0xc2, 0xeb, 0x19, 0x95, 0xc3, 0xbf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x5b, 0xcf, 0x13, 0x0c, 0x1b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -72,29 +282,61 @@ func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n1 - dAtA[i] = 0x12 - i++ + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Handler) + copy(dAtA[i:], m.Handler) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) - i += copy(dAtA[i:], m.Handler) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -102,53 +344,157 @@ func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = len(m.Handler) n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -162,26 +508,64 @@ func (m *RuntimeClassList) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} func (this *RuntimeClass) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, `}`, }, "") return s @@ -190,9 +574,40 @@ func (this *RuntimeClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, `}`, }, "") return s @@ -205,6 +620,188 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RuntimeClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -220,7 +817,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -248,7 +845,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -257,6 +854,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -278,7 +878,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -288,11 +888,86 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Handler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -302,6 +977,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -329,7 +1007,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -357,7 +1035,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -366,6 +1044,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -387,7 +1068,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -396,6 +1077,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -413,6 +1097,223 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -479,10 +1380,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -511,6 +1415,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -529,36 +1436,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 389 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6a, 0xdb, 0x40, - 0x14, 0x85, 0x35, 0x2e, 0xc6, 0xae, 0xdc, 0x52, 0xa3, 0x4d, 0x8d, 0x17, 0x63, 0x63, 0x28, 0xb8, - 0x0b, 0xcf, 0xd4, 0xa6, 0x94, 0x2e, 0x8b, 0xba, 0x69, 0x4b, 0x4b, 0x41, 0xcb, 0x90, 0x45, 0x46, - 0xd2, 0x8d, 0x34, 0x91, 0xa5, 0x11, 0x9a, 0x91, 0x20, 0xbb, 0x3c, 0x42, 0xf6, 0x79, 0x95, 0x3c, - 0x80, 0x97, 0x5e, 0x7a, 0x65, 0x62, 0xe5, 0x45, 0x82, 0x7e, 0xfc, 0x43, 0x8c, 0x49, 0x76, 0xba, - 0xe7, 0x9e, 0x73, 0xee, 0x87, 0x18, 0xfd, 0x47, 0xf0, 0x5d, 0x12, 0x2e, 0x68, 0x90, 0xda, 0x90, - 0x44, 0xa0, 0x40, 0xd2, 0x0c, 0x22, 0x57, 0x24, 0xb4, 0x5e, 0xb0, 0x98, 0xd3, 0x48, 0xb8, 0x40, - 0xb3, 0xa9, 0x0d, 0x8a, 0x4d, 0xa9, 0x07, 0x11, 0x24, 0x4c, 0x81, 0x4b, 0xe2, 0x44, 0x28, 0x61, - 0x7c, 0xac, 0x8c, 0x84, 0xc5, 0x9c, 0x14, 0x46, 0x52, 0x1b, 0xfb, 0x13, 0x8f, 0x2b, 0x3f, 0xb5, - 0x89, 0x23, 0x42, 0xea, 0x09, 0x4f, 0xd0, 0xd2, 0x6f, 0xa7, 0x97, 0xe5, 0x54, 0x0e, 0xe5, 0x57, - 0xd5, 0xd3, 0xff, 0xba, 0x3f, 0x18, 0x32, 0xc7, 0xe7, 0x11, 0x24, 0xd7, 0x34, 0x0e, 0xbc, 0x42, - 0x90, 0x34, 0x04, 0xc5, 0x68, 0x76, 0x74, 0xbd, 0x4f, 0x4f, 0xa5, 0x92, 0x34, 0x52, 0x3c, 0x84, - 0xa3, 0xc0, 0xb7, 0x97, 0x02, 0xd2, 0xf1, 0x21, 0x64, 0xcf, 0x73, 0xa3, 0x3b, 0xa4, 0xbf, 0xb3, - 0x2a, 0xcb, 0xcf, 0x39, 0x93, 0xd2, 0xb8, 0xd0, 0xdb, 0x05, 0x94, 0xcb, 0x14, 0xeb, 0xa1, 0x21, - 0x1a, 0x77, 0x66, 0x5f, 0xc8, 0xfe, 0x57, 0xec, 0xba, 0x49, 0x1c, 0x78, 0x85, 0x20, 0x49, 0xe1, - 0x26, 0xd9, 0x94, 0xfc, 0xb7, 0xaf, 0xc0, 0x51, 0xff, 0x40, 0x31, 0xd3, 0x58, 0xac, 0x07, 0x5a, - 0xbe, 0x1e, 0xe8, 0x7b, 0xcd, 0xda, 0xb5, 0x1a, 0x9f, 0xf5, 0x96, 0xcf, 0x22, 0x77, 0x0e, 0x49, - 0xaf, 0x31, 0x44, 0xe3, 0xb7, 0xe6, 0x87, 0xda, 0xde, 0xfa, 0x55, 0xc9, 0xd6, 0x76, 0x3f, 0xba, - 0x47, 0x7a, 0xf7, 0x90, 0xee, 0x2f, 0x97, 0xca, 0x38, 0x3f, 0x22, 0x24, 0xaf, 0x23, 0x2c, 0xd2, - 0x25, 0x5f, 0xb7, 0x3e, 0xd8, 0xde, 0x2a, 0x07, 0x74, 0x7f, 0xf4, 0x26, 0x57, 0x10, 0xca, 0x5e, - 0x63, 0xf8, 0x66, 0xdc, 0x99, 0x7d, 0x22, 0x27, 0xde, 0x01, 0x39, 0xe4, 0x32, 0xdf, 0xd7, 0x8d, - 0xcd, 0xdf, 0x45, 0xd6, 0xaa, 0x2a, 0xcc, 0xc9, 0x62, 0x83, 0xb5, 0xe5, 0x06, 0x6b, 0xab, 0x0d, - 0xd6, 0x6e, 0x72, 0x8c, 0x16, 0x39, 0x46, 0xcb, 0x1c, 0xa3, 0x55, 0x8e, 0xd1, 0x43, 0x8e, 0xd1, - 0xed, 0x23, 0xd6, 0xce, 0x5a, 0x75, 0xe3, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x93, 0x68, 0xe5, - 0x0d, 0xb5, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 9082fbd3..49166798 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -21,6 +21,8 @@ syntax = 'proto2'; package k8s.io.api.node.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,6 +30,13 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run // all containers in a pod. RuntimeClasses are (currently) manually defined by a @@ -36,7 +45,7 @@ option go_package = "v1beta1"; // pod. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md message RuntimeClass { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -51,12 +60,26 @@ message RuntimeClass { // The Handler must conform to the DNS Label (RFC 1123) requirements, and is // immutable. optional string handler = 2; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 3; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 4; } // RuntimeClassList is a list of RuntimeClass objects. message RuntimeClassList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -64,3 +87,22 @@ message RuntimeClassList { repeated RuntimeClass items = 2; } +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index 993c6e50..793a48f6 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +34,7 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md type RuntimeClass struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -48,6 +49,46 @@ type RuntimeClass struct { // The Handler must conform to the DNS Label (RFC 1123) requirements, and is // immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -56,7 +97,7 @@ type RuntimeClass struct { type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index 8bfa304e..681f73f2 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -27,10 +27,21 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -39,7 +50,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -47,4 +58,14 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { return map_RuntimeClassList } +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go index 98b5d480..c3989528 100644 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -21,14 +21,48 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } @@ -54,7 +88,7 @@ func (in *RuntimeClass) DeepCopyObject() runtime.Object { func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RuntimeClass, len(*in)) @@ -82,3 +116,33 @@ func (in *RuntimeClassList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index deeeac69..5b57f699 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -17,49 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto - - It has these top-level messages: - AllowedCSIDriver - AllowedFlexVolume - AllowedHostPath - Eviction - FSGroupStrategyOptions - HostPortRange - IDRange - PodDisruptionBudget - PodDisruptionBudgetList - PodDisruptionBudgetSpec - PodDisruptionBudgetStatus - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - RunAsGroupStrategyOptions - RunAsUserStrategyOptions - SELinuxStrategyOptions - SupplementalGroupsStrategyOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -72,85 +49,537 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo + +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{3} +} +func (m *Eviction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Eviction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Eviction.Merge(m, src) +} +func (m *Eviction) XXX_Size() int { + return m.Size() +} +func (m *Eviction) XXX_DiscardUnknown() { + xxx_messageInfo_Eviction.DiscardUnknown(m) +} + +var xxx_messageInfo_Eviction proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{4} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{5} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{6} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} -func (m *Eviction) Reset() { *m = Eviction{} } -func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_IDRange proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{7} +} +func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudget) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudget.Merge(m, src) +} +func (m *PodDisruptionBudget) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudget) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudget.DiscardUnknown(m) +} -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{8} +} +func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetList.Merge(m, src) +} +func (m *PodDisruptionBudgetList) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetList.DiscardUnknown(m) +} -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{9} +} +func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetSpec.Merge(m, src) +} +func (m *PodDisruptionBudgetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetSpec.DiscardUnknown(m) +} -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_014060e454a820dc, []int{10} +} +func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetStatus.Merge(m, src) +} +func (m *PodDisruptionBudgetStatus) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{11} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{12} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) } -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{13} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) +} -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_014060e454a820dc, []int{14} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptor_014060e454a820dc, []int{15} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{16} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) } -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{17} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{17} + return fileDescriptor_014060e454a820dc, []int{18} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo func init() { proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") @@ -164,18 +593,147 @@ func init() { proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") + proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_014060e454a820dc) +} + +var fileDescriptor_014060e454a820dc = []byte{ + // 1883 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x35, 0xd7, 0xd9, 0x00, + 0x85, 0x9b, 0x26, 0xcb, 0x58, 0x76, 0x5c, 0xa3, 0x69, 0x8b, 0x68, 0x45, 0xc9, 0x56, 0x60, 0x59, + 0xec, 0xd0, 0x0e, 0xda, 0xc2, 0x2d, 0x3a, 0xe4, 0x8e, 0xa8, 0x8d, 0x96, 0xbb, 0xdb, 0x99, 0x59, + 0x46, 0xbc, 0xeb, 0x45, 0x2f, 0x7a, 0xd9, 0x17, 0x08, 0xfa, 0x00, 0x45, 0xaf, 0xfa, 0x12, 0x0e, + 0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0x2c, 0xfa, 0x12, 0xbe, 0x0a, 0x76, 0x38, 0xbb, 0xe4, + 0xfe, 0x91, 0x76, 0x00, 0xfb, 0x8e, 0x3b, 0xe7, 0xfb, 0xbe, 0x33, 0x73, 0xe6, 0xcc, 0x99, 0xc3, + 0x01, 0xe6, 0xc5, 0x7d, 0x66, 0xd8, 0x5e, 0xed, 0x22, 0x68, 0x11, 0xea, 0x12, 0x4e, 0x58, 0xad, + 0x47, 0x5c, 0xcb, 0xa3, 0x35, 0x69, 0xc0, 0xbe, 0x5d, 0xf3, 0x3d, 0xc7, 0x6e, 0xf7, 0x6b, 0xbd, + 0xdb, 0x2d, 0xc2, 0xf1, 0xed, 0x5a, 0x87, 0xb8, 0x84, 0x62, 0x4e, 0x2c, 0xc3, 0xa7, 0x1e, 0xf7, + 0xe0, 0xf5, 0x11, 0xd4, 0xc0, 0xbe, 0x6d, 0x8c, 0xa0, 0x86, 0x84, 0xee, 0x7e, 0xd8, 0xb1, 0xf9, + 0x79, 0xd0, 0x32, 0xda, 0x5e, 0xb7, 0xd6, 0xf1, 0x3a, 0x5e, 0x4d, 0x30, 0x5a, 0xc1, 0x99, 0xf8, + 0x12, 0x1f, 0xe2, 0xd7, 0x48, 0x69, 0x57, 0x9f, 0x70, 0xda, 0xf6, 0x28, 0xa9, 0xf5, 0x32, 0xde, + 0x76, 0xef, 0x8e, 0x31, 0x5d, 0xdc, 0x3e, 0xb7, 0x5d, 0x42, 0xfb, 0x35, 0xff, 0xa2, 0x13, 0x0e, + 0xb0, 0x5a, 0x97, 0x70, 0x9c, 0xc7, 0xaa, 0x15, 0xb1, 0x68, 0xe0, 0x72, 0xbb, 0x4b, 0x32, 0x84, + 0x7b, 0xb3, 0x08, 0xac, 0x7d, 0x4e, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0xb8, 0xed, 0xd4, + 0x6c, 0x97, 0x33, 0x4e, 0xd3, 0x24, 0xfd, 0x2e, 0xd8, 0xd8, 0x77, 0x1c, 0xef, 0x4b, 0x62, 0x1d, + 0x34, 0x8f, 0xeb, 0xd4, 0xee, 0x11, 0x0a, 0x6f, 0x82, 0x79, 0x17, 0x77, 0x89, 0xaa, 0xdc, 0x54, + 0x6e, 0x2d, 0x9b, 0xab, 0xcf, 0x07, 0xda, 0xdc, 0x70, 0xa0, 0xcd, 0x3f, 0xc6, 0x5d, 0x82, 0x84, + 0x45, 0xff, 0x04, 0x54, 0x24, 0xeb, 0xc8, 0x21, 0x97, 0x9f, 0x7b, 0x4e, 0xd0, 0x25, 0xf0, 0xc7, + 0x60, 0xd1, 0x12, 0x02, 0x92, 0xb8, 0x2e, 0x89, 0x8b, 0x23, 0x59, 0x24, 0xad, 0x3a, 0x03, 0x57, + 0x25, 0xf9, 0xa1, 0xc7, 0x78, 0x03, 0xf3, 0x73, 0xb8, 0x07, 0x80, 0x8f, 0xf9, 0x79, 0x83, 0x92, + 0x33, 0xfb, 0x52, 0xd2, 0xa1, 0xa4, 0x83, 0x46, 0x6c, 0x41, 0x13, 0x28, 0xf8, 0x01, 0x28, 0x53, + 0x82, 0xad, 0x53, 0xd7, 0xe9, 0xab, 0x57, 0x6e, 0x2a, 0xb7, 0xca, 0xe6, 0x86, 0x64, 0x94, 0x91, + 0x1c, 0x47, 0x31, 0x42, 0xff, 0x8f, 0x02, 0xca, 0x87, 0x3d, 0xbb, 0xcd, 0x6d, 0xcf, 0x85, 0x7f, + 0x04, 0xe5, 0x70, 0xb7, 0x2c, 0xcc, 0xb1, 0x70, 0xb6, 0xb2, 0xf7, 0x91, 0x31, 0xce, 0xa4, 0x38, + 0x78, 0x86, 0x7f, 0xd1, 0x09, 0x07, 0x98, 0x11, 0xa2, 0x8d, 0xde, 0x6d, 0xe3, 0xb4, 0xf5, 0x05, + 0x69, 0xf3, 0x13, 0xc2, 0xf1, 0x78, 0x7a, 0xe3, 0x31, 0x14, 0xab, 0x42, 0x07, 0xac, 0x59, 0xc4, + 0x21, 0x9c, 0x9c, 0xfa, 0xa1, 0x47, 0x26, 0x66, 0xb8, 0xb2, 0x77, 0xe7, 0xd5, 0xdc, 0xd4, 0x27, + 0xa9, 0x66, 0x65, 0x38, 0xd0, 0xd6, 0x12, 0x43, 0x28, 0x29, 0xae, 0x7f, 0xa5, 0x80, 0x9d, 0xa3, + 0xe6, 0x03, 0xea, 0x05, 0x7e, 0x93, 0x87, 0xbb, 0xdb, 0xe9, 0x4b, 0x13, 0xfc, 0x19, 0x98, 0xa7, + 0x81, 0x13, 0xed, 0xe5, 0x7b, 0xd1, 0x5e, 0xa2, 0xc0, 0x21, 0x2f, 0x07, 0xda, 0x66, 0x8a, 0xf5, + 0xa4, 0xef, 0x13, 0x24, 0x08, 0xf0, 0x33, 0xb0, 0x48, 0xb1, 0xdb, 0x21, 0xe1, 0xd4, 0x4b, 0xb7, + 0x56, 0xf6, 0x74, 0xa3, 0xf0, 0xac, 0x19, 0xc7, 0x75, 0x14, 0x42, 0xc7, 0x3b, 0x2e, 0x3e, 0x19, + 0x92, 0x0a, 0xfa, 0x09, 0x58, 0x13, 0x5b, 0xed, 0x51, 0x2e, 0x2c, 0xf0, 0x06, 0x28, 0x75, 0x6d, + 0x57, 0x4c, 0x6a, 0xc1, 0x5c, 0x91, 0xac, 0xd2, 0x89, 0xed, 0xa2, 0x70, 0x5c, 0x98, 0xf1, 0xa5, + 0x88, 0xd9, 0xa4, 0x19, 0x5f, 0xa2, 0x70, 0x5c, 0x7f, 0x00, 0x96, 0xa4, 0xc7, 0x49, 0xa1, 0xd2, + 0x74, 0xa1, 0x52, 0x8e, 0xd0, 0x3f, 0xae, 0x80, 0xcd, 0x86, 0x67, 0xd5, 0x6d, 0x46, 0x03, 0x11, + 0x2f, 0x33, 0xb0, 0x3a, 0x84, 0xbf, 0x85, 0xfc, 0x78, 0x02, 0xe6, 0x99, 0x4f, 0xda, 0x32, 0x2d, + 0xf6, 0xa6, 0xc4, 0x36, 0x67, 0x7e, 0x4d, 0x9f, 0xb4, 0xc7, 0xc7, 0x32, 0xfc, 0x42, 0x42, 0x0d, + 0x3e, 0x03, 0x8b, 0x8c, 0x63, 0x1e, 0x30, 0xb5, 0x24, 0x74, 0xef, 0xbe, 0xa6, 0xae, 0xe0, 0x8e, + 0x77, 0x71, 0xf4, 0x8d, 0xa4, 0xa6, 0xfe, 0x6f, 0x05, 0x5c, 0xcb, 0x61, 0x3d, 0xb2, 0x19, 0x87, + 0xcf, 0x32, 0x11, 0x33, 0x5e, 0x2d, 0x62, 0x21, 0x5b, 0xc4, 0x2b, 0x3e, 0xbc, 0xd1, 0xc8, 0x44, + 0xb4, 0x9a, 0x60, 0xc1, 0xe6, 0xa4, 0x1b, 0xa5, 0xa2, 0xf1, 0x7a, 0xcb, 0x32, 0xd7, 0xa4, 0xf4, + 0xc2, 0x71, 0x28, 0x82, 0x46, 0x5a, 0xfa, 0x37, 0x57, 0x72, 0x97, 0x13, 0x86, 0x13, 0x9e, 0x81, + 0xd5, 0xae, 0xed, 0xee, 0xf7, 0xb0, 0xed, 0xe0, 0x96, 0x3c, 0x3d, 0xd3, 0x92, 0x20, 0xac, 0xb0, + 0xc6, 0xa8, 0xc2, 0x1a, 0xc7, 0x2e, 0x3f, 0xa5, 0x4d, 0x4e, 0x6d, 0xb7, 0x63, 0x6e, 0x0c, 0x07, + 0xda, 0xea, 0xc9, 0x84, 0x12, 0x4a, 0xe8, 0xc2, 0xdf, 0x83, 0x32, 0x23, 0x0e, 0x69, 0x73, 0x8f, + 0xbe, 0x5e, 0x85, 0x78, 0x84, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, + 0xb1, 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x75, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, + 0x1c, 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xff, 0x9f, 0x07, 0xd7, 0x0b, 0xb3, + 0x0a, 0x7e, 0x06, 0xa0, 0xd7, 0x62, 0x84, 0xf6, 0x88, 0xf5, 0x60, 0x74, 0x07, 0xd9, 0x5e, 0x74, + 0x70, 0x77, 0xe5, 0x06, 0xc1, 0xd3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0x5f, 0x14, 0xb0, 0x66, 0x8d, + 0xdc, 0x10, 0xab, 0xe1, 0x59, 0x51, 0x62, 0x3c, 0xf8, 0x21, 0xf9, 0x6e, 0xd4, 0x27, 0x95, 0x0e, + 0x5d, 0x4e, 0xfb, 0xe6, 0xb6, 0x9c, 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xe1, 0x09, 0x80, 0x56, + 0x2c, 0xc9, 0xe4, 0x9d, 0x26, 0x42, 0xbc, 0x60, 0xde, 0x90, 0x0a, 0xdb, 0x09, 0xbf, 0x11, 0x08, + 0xe5, 0x10, 0xe1, 0xaf, 0xc0, 0x7a, 0x3b, 0xa0, 0x94, 0xb8, 0xfc, 0x21, 0xc1, 0x0e, 0x3f, 0xef, + 0xab, 0xf3, 0x42, 0x6a, 0x47, 0x4a, 0xad, 0x1f, 0x24, 0xac, 0x28, 0x85, 0x0e, 0xf9, 0x16, 0x61, + 0x36, 0x25, 0x56, 0xc4, 0x5f, 0x48, 0xf2, 0xeb, 0x09, 0x2b, 0x4a, 0xa1, 0xe1, 0x7d, 0xb0, 0x4a, + 0x2e, 0x7d, 0xd2, 0x8e, 0x62, 0xba, 0x28, 0xd8, 0x5b, 0x92, 0xbd, 0x7a, 0x38, 0x61, 0x43, 0x09, + 0xe4, 0xae, 0x03, 0x60, 0x36, 0x88, 0x70, 0x03, 0x94, 0x2e, 0x48, 0x7f, 0x74, 0xf3, 0xa0, 0xf0, + 0x27, 0xfc, 0x14, 0x2c, 0xf4, 0xb0, 0x13, 0x10, 0x99, 0xeb, 0xef, 0xbf, 0x5a, 0xae, 0x3f, 0xb1, + 0xbb, 0x04, 0x8d, 0x88, 0x3f, 0xbf, 0x72, 0x5f, 0xd1, 0xbf, 0x56, 0x40, 0xa5, 0xe1, 0x59, 0x4d, + 0xd2, 0x0e, 0xa8, 0xcd, 0xfb, 0x0d, 0xb1, 0xcf, 0x6f, 0xa1, 0x66, 0xa3, 0x44, 0xcd, 0xfe, 0x68, + 0x7a, 0xae, 0x25, 0x67, 0x57, 0x54, 0xb1, 0xf5, 0xe7, 0x0a, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0x45, + 0xfd, 0x75, 0xb2, 0xa2, 0x7e, 0xf0, 0x3a, 0x8b, 0x29, 0xa8, 0xa7, 0x5f, 0x57, 0x72, 0x96, 0x22, + 0xaa, 0x69, 0xd8, 0xdd, 0x51, 0xbb, 0x67, 0x3b, 0xa4, 0x43, 0x2c, 0xb1, 0x98, 0xf2, 0x44, 0x77, + 0x17, 0x5b, 0xd0, 0x04, 0x0a, 0x32, 0xb0, 0x63, 0x91, 0x33, 0x1c, 0x38, 0x7c, 0xdf, 0xb2, 0x0e, + 0xb0, 0x8f, 0x5b, 0xb6, 0x63, 0x73, 0x5b, 0xb6, 0x23, 0xcb, 0xe6, 0x27, 0xc3, 0x81, 0xb6, 0x53, + 0xcf, 0x45, 0xbc, 0x1c, 0x68, 0x37, 0xb2, 0xdd, 0xbc, 0x11, 0x43, 0xfa, 0xa8, 0x40, 0x1a, 0xf6, + 0x81, 0x4a, 0xc9, 0x9f, 0x82, 0xf0, 0x50, 0xd4, 0xa9, 0xe7, 0x27, 0xdc, 0x96, 0x84, 0xdb, 0x5f, + 0x0e, 0x07, 0x9a, 0x8a, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x96, 0x7d, + 0xf8, 0xa4, 0xd7, 0x79, 0xe1, 0xf5, 0xfe, 0x70, 0xa0, 0x6d, 0xee, 0x67, 0xcd, 0xb3, 0x1d, 0xe6, + 0x89, 0xc2, 0x1a, 0x58, 0xea, 0x89, 0x96, 0x9d, 0xa9, 0x0b, 0x42, 0x7f, 0x7b, 0x38, 0xd0, 0x96, + 0x46, 0x5d, 0x7c, 0xa8, 0xb9, 0x78, 0xd4, 0x14, 0x8d, 0x60, 0x84, 0x82, 0x1f, 0x83, 0x95, 0x73, + 0x8f, 0xf1, 0xc7, 0x84, 0x7f, 0xe9, 0xd1, 0x0b, 0x51, 0x18, 0xca, 0xe6, 0xa6, 0xdc, 0xc1, 0x95, + 0x87, 0x63, 0x13, 0x9a, 0xc4, 0xc1, 0xdf, 0x82, 0xe5, 0x73, 0xd9, 0xf6, 0x31, 0x75, 0x49, 0x24, + 0xda, 0xad, 0x29, 0x89, 0x96, 0x68, 0x11, 0xcd, 0x8a, 0x94, 0x5f, 0x8e, 0x86, 0x19, 0x1a, 0xab, + 0xc1, 0x9f, 0x80, 0x25, 0xf1, 0x71, 0x5c, 0x57, 0xcb, 0x62, 0x36, 0x57, 0x25, 0x7c, 0xe9, 0xe1, + 0x68, 0x18, 0x45, 0xf6, 0x08, 0x7a, 0xdc, 0x38, 0x50, 0x97, 0xb3, 0xd0, 0xe3, 0xc6, 0x01, 0x8a, + 0xec, 0xf0, 0x19, 0x58, 0x62, 0xe4, 0x91, 0xed, 0x06, 0x97, 0x2a, 0x10, 0x47, 0xee, 0xf6, 0x94, + 0xe9, 0x36, 0x0f, 0x05, 0x32, 0xd5, 0x70, 0x8f, 0xd5, 0xa5, 0x1d, 0x45, 0x92, 0xd0, 0x02, 0xcb, + 0x34, 0x70, 0xf7, 0xd9, 0x53, 0x46, 0xa8, 0xba, 0x92, 0xb9, 0xed, 0xd3, 0xfa, 0x28, 0xc2, 0xa6, + 0x3d, 0xc4, 0x91, 0x89, 0x11, 0x68, 0x2c, 0x0c, 0x2d, 0x00, 0xc4, 0x87, 0xe8, 0xeb, 0xd5, 0x9d, + 0x99, 0x7d, 0x20, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf1, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, + 0xab, 0x02, 0x20, 0x0b, 0x7c, 0xdf, 0x21, 0x5d, 0xe2, 0x72, 0xec, 0x88, 0x51, 0xa6, 0xae, 0x0a, + 0x77, 0xbf, 0x98, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0x6e, 0x06, 0xb2, 0x50, 0x94, 0xe3, 0x33, + 0xdc, 0xb4, 0x33, 0xb9, 0xda, 0xb5, 0x99, 0x9b, 0x96, 0xff, 0x2f, 0x69, 0xbc, 0x69, 0xd2, 0x8e, + 0x22, 0x49, 0xf8, 0x39, 0xd8, 0x89, 0xfe, 0x43, 0x22, 0xcf, 0xe3, 0x47, 0xb6, 0x43, 0x58, 0x9f, + 0x71, 0xd2, 0x55, 0xd7, 0x45, 0x32, 0x55, 0x25, 0x73, 0x07, 0xe5, 0xa2, 0x50, 0x01, 0x1b, 0x76, + 0x81, 0x16, 0x15, 0xa1, 0xf0, 0x84, 0xc6, 0x55, 0xf0, 0x90, 0xb5, 0xb1, 0x33, 0xea, 0x8d, 0xae, + 0x0a, 0x07, 0xef, 0x0d, 0x07, 0x9a, 0x56, 0x9f, 0x0e, 0x45, 0xb3, 0xb4, 0xe0, 0x6f, 0x80, 0x8a, + 0x8b, 0xfc, 0x6c, 0x08, 0x3f, 0x3f, 0x0a, 0x2b, 0x5b, 0xa1, 0x83, 0x42, 0x36, 0xf4, 0xc1, 0x06, + 0x4e, 0xfe, 0x9b, 0x67, 0x6a, 0x45, 0x9c, 0xf5, 0xf7, 0xa7, 0xec, 0x43, 0xea, 0x01, 0xc0, 0x54, + 0x65, 0x18, 0x37, 0x52, 0x06, 0x86, 0x32, 0xea, 0xf0, 0x12, 0x40, 0x9c, 0x7e, 0x7c, 0x60, 0x2a, + 0x9c, 0x79, 0x91, 0x65, 0x5e, 0x2c, 0xc6, 0xa9, 0x96, 0x31, 0x31, 0x94, 0xe3, 0x03, 0x72, 0x50, + 0xc1, 0xa9, 0xc7, 0x12, 0xa6, 0x5e, 0x13, 0x8e, 0x7f, 0x3a, 0xdb, 0x71, 0xcc, 0x31, 0xaf, 0x4b, + 0xbf, 0x95, 0xb4, 0x85, 0xa1, 0xac, 0x03, 0xf8, 0x08, 0x6c, 0xc9, 0xc1, 0xa7, 0x2e, 0xc3, 0x67, + 0xa4, 0xd9, 0x67, 0x6d, 0xee, 0x30, 0x75, 0x53, 0xd4, 0x6e, 0x75, 0x38, 0xd0, 0xb6, 0xf6, 0x73, + 0xec, 0x28, 0x97, 0x05, 0x3f, 0x05, 0x1b, 0x67, 0x1e, 0x6d, 0xd9, 0x96, 0x45, 0xdc, 0x48, 0x69, + 0x4b, 0x28, 0x6d, 0x85, 0xf1, 0x3f, 0x4a, 0xd9, 0x50, 0x06, 0x0d, 0x19, 0xd8, 0x96, 0xca, 0x0d, + 0xea, 0xb5, 0x4f, 0xbc, 0xc0, 0xe5, 0xe1, 0x75, 0xc1, 0xd4, 0xed, 0xf8, 0x8a, 0xdc, 0xde, 0xcf, + 0x03, 0xbc, 0x1c, 0x68, 0x37, 0x73, 0xae, 0xab, 0x04, 0x08, 0xe5, 0x6b, 0x43, 0x07, 0xac, 0xca, + 0xe7, 0xaf, 0x03, 0x07, 0x33, 0xa6, 0xaa, 0xe2, 0xa8, 0xdf, 0x9b, 0x5e, 0xd8, 0x62, 0x78, 0xfa, + 0xbc, 0x8b, 0xff, 0x65, 0x93, 0x00, 0x94, 0x50, 0xd7, 0xff, 0xae, 0x80, 0xeb, 0x85, 0x85, 0x11, + 0xde, 0x4b, 0xbc, 0xa9, 0xe8, 0xa9, 0x37, 0x15, 0x98, 0x25, 0xbe, 0x81, 0x27, 0x95, 0xaf, 0x14, + 0xa0, 0x16, 0xdd, 0x10, 0xf0, 0xe3, 0xc4, 0x04, 0xdf, 0x4d, 0x4d, 0xb0, 0x92, 0xe1, 0xbd, 0x81, + 0xf9, 0x7d, 0xa3, 0x80, 0x77, 0xa6, 0xec, 0x40, 0x5c, 0x90, 0x88, 0x35, 0x89, 0x7a, 0x8c, 0xc3, + 0xa3, 0xac, 0x88, 0x3c, 0x1a, 0x17, 0xa4, 0x1c, 0x0c, 0x2a, 0x64, 0xc3, 0xa7, 0xe0, 0x9a, 0xac, + 0x86, 0x69, 0x9b, 0xe8, 0xdc, 0x97, 0xcd, 0x77, 0x86, 0x03, 0xed, 0x5a, 0x3d, 0x1f, 0x82, 0x8a, + 0xb8, 0xfa, 0x3f, 0x15, 0xb0, 0x93, 0x7f, 0xe5, 0xc3, 0x3b, 0x89, 0x70, 0x6b, 0xa9, 0x70, 0x5f, + 0x4d, 0xb1, 0x64, 0xb0, 0xff, 0x00, 0xd6, 0x65, 0x63, 0x90, 0x7c, 0x22, 0x4c, 0x04, 0x3d, 0x3c, + 0x22, 0x61, 0x4f, 0x2f, 0x25, 0xa2, 0xf4, 0x15, 0xff, 0xc6, 0x93, 0x63, 0x28, 0xa5, 0xa6, 0xff, + 0x4b, 0x01, 0xef, 0xce, 0xbc, 0x6c, 0xa1, 0x99, 0x98, 0xba, 0x91, 0x9a, 0x7a, 0xb5, 0x58, 0xe0, + 0xcd, 0xbc, 0x14, 0x9a, 0x1f, 0x3e, 0x7f, 0x51, 0x9d, 0xfb, 0xf6, 0x45, 0x75, 0xee, 0xbb, 0x17, + 0xd5, 0xb9, 0x3f, 0x0f, 0xab, 0xca, 0xf3, 0x61, 0x55, 0xf9, 0x76, 0x58, 0x55, 0xbe, 0x1b, 0x56, + 0x95, 0xff, 0x0e, 0xab, 0xca, 0xdf, 0xfe, 0x57, 0x9d, 0xfb, 0xdd, 0x92, 0x94, 0xfb, 0x3e, 0x00, + 0x00, 0xff, 0xff, 0x48, 0x23, 0x7b, 0x0e, 0x44, 0x18, 0x00, 0x00, +} + func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -183,21 +741,27 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { } func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -205,21 +769,27 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { } func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -227,29 +797,35 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { } func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -257,35 +833,44 @@ func (m *Eviction) Marshal() (dAtA []byte, err error) { } func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size())) - n2, err := m.DeleteOptions.MarshalTo(dAtA[i:]) + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -293,33 +878,41 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -327,23 +920,28 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { } func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -351,23 +949,28 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -375,41 +978,52 @@ func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -417,37 +1031,46 @@ func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -455,47 +1078,58 @@ func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MinAvailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size())) - n7, err := m.MinAvailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x1a } if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x12 } - if m.MaxUnavailable != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MinAvailable != nil { + { + size, err := m.MinAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -503,63 +1137,66 @@ func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) + i-- + dAtA[i] = 0x18 if len(m.DisruptedPods) > 0 { keysForDisruptedPods := make([]string, 0, len(m.DisruptedPods)) for k := range m.DisruptedPods { keysForDisruptedPods = append(keysForDisruptedPods, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - for _, k := range keysForDisruptedPods { + for iNdEx := len(keysForDisruptedPods) - 1; iNdEx >= 0; iNdEx-- { + v := m.DisruptedPods[string(keysForDisruptedPods[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - v := m.DisruptedPods[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForDisruptedPods[iNdEx]) + copy(dAtA[i:], keysForDisruptedPods[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDisruptedPods[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 } } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -567,33 +1204,42 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -601,37 +1247,46 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n13, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -639,288 +1294,283 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + i-- + if m.ReadOnlyRootFilesystem { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n14, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n15, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n16, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n17, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x3a } } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if m.RunAsGroup != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) - n18, err := m.RunAsGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i += n18 } - if len(m.AllowedCSIDrivers) > 0 { - for _, msg := range m.AllowedCSIDrivers { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -928,33 +1578,41 @@ func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -962,33 +1620,80 @@ func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -996,31 +1701,39 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n19, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1028,39 +1741,52 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1069,6 +1795,9 @@ func (m *AllowedCSIDriver) Size() (n int) { } func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Driver) @@ -1077,6 +1806,9 @@ func (m *AllowedFlexVolume) Size() (n int) { } func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.PathPrefix) @@ -1086,6 +1818,9 @@ func (m *AllowedHostPath) Size() (n int) { } func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1098,6 +1833,9 @@ func (m *Eviction) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1112,6 +1850,9 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1120,6 +1861,9 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1128,6 +1872,9 @@ func (m *IDRange) Size() (n int) { } func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1140,6 +1887,9 @@ func (m *PodDisruptionBudget) Size() (n int) { } func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1154,6 +1904,9 @@ func (m *PodDisruptionBudgetList) Size() (n int) { } func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MinAvailable != nil { @@ -1172,6 +1925,9 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { } func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1192,6 +1948,9 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1202,6 +1961,9 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1216,6 +1978,9 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1307,10 +2072,17 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1325,6 +2097,9 @@ func (m *RunAsGroupStrategyOptions) Size() (n int) { } func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1338,7 +2113,29 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1351,6 +2148,9 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1365,14 +2165,7 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1413,8 +2206,8 @@ func (this *Eviction) String() string { return "nil" } s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, `}`, }, "") return s @@ -1423,9 +2216,14 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1457,7 +2255,7 @@ func (this *PodDisruptionBudget) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1468,9 +2266,14 @@ func (this *PodDisruptionBudgetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1480,9 +2283,9 @@ func (this *PodDisruptionBudgetSpec) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1496,7 +2299,7 @@ func (this *PodDisruptionBudgetStatus) String() string { keysForDisruptedPods = append(keysForDisruptedPods, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" + mapStringForDisruptedPods := "map[string]v1.Time{" for _, k := range keysForDisruptedPods { mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } @@ -1517,7 +2320,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1527,9 +2330,14 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1538,6 +2346,26 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -1545,7 +2373,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -1555,13 +2383,14 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -1570,9 +2399,14 @@ func (this *RunAsGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1581,9 +2415,25 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, `}`, }, "") return s @@ -1594,7 +2444,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -1603,9 +2453,14 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1633,7 +2488,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1661,7 +2516,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1671,6 +2526,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1685,6 +2543,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1712,7 +2573,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1740,7 +2601,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1750,6 +2611,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1764,6 +2628,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1791,7 +2658,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1819,7 +2686,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1829,6 +2696,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1848,7 +2718,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1863,6 +2733,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1890,7 +2763,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1918,7 +2791,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1927,6 +2800,9 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1948,7 +2824,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1957,11 +2833,14 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} + m.DeleteOptions = &v1.DeleteOptions{} } if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1976,6 +2855,9 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2003,7 +2885,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2031,7 +2913,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2041,6 +2923,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2060,7 +2945,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2069,6 +2954,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2086,6 +2974,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2113,7 +3004,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2141,7 +3032,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + m.Min |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2160,7 +3051,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + m.Max |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2174,6 +3065,9 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2201,7 +3095,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2229,7 +3123,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.Min |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2248,7 +3142,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Max |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2262,6 +3156,9 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2289,7 +3186,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2317,7 +3214,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2326,6 +3223,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2347,7 +3247,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2356,6 +3256,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2377,7 +3280,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2386,6 +3289,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2402,6 +3308,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2429,7 +3338,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2457,7 +3366,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2466,6 +3375,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2487,7 +3399,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2496,6 +3408,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2513,6 +3428,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2540,7 +3458,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2568,7 +3486,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2577,11 +3495,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MinAvailable == nil { - m.MinAvailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MinAvailable = &intstr.IntOrString{} } if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2601,7 +3522,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2610,11 +3531,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2634,7 +3558,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2643,11 +3567,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2662,6 +3589,9 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2689,7 +3619,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2717,7 +3647,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2736,7 +3666,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2745,14 +3675,17 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) + m.DisruptedPods = make(map[string]v1.Time) } var mapkey string - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + mapvalue := &v1.Time{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -2765,7 +3698,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2782,7 +3715,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2792,6 +3725,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2808,7 +3744,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2817,13 +3753,13 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + mapvalue = &v1.Time{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -2859,7 +3795,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift + m.PodDisruptionsAllowed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2878,7 +3814,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentHealthy |= (int32(b) & 0x7F) << shift + m.CurrentHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2897,7 +3833,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredHealthy |= (int32(b) & 0x7F) << shift + m.DesiredHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2916,7 +3852,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExpectedPods |= (int32(b) & 0x7F) << shift + m.ExpectedPods |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2930,6 +3866,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2957,7 +3896,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2985,7 +3924,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2994,6 +3933,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3015,7 +3957,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3024,6 +3966,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3040,6 +3985,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3067,7 +4015,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3095,7 +4043,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3104,6 +4052,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3125,7 +4076,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3134,6 +4085,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3151,6 +4105,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3178,7 +4135,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3206,7 +4163,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3226,7 +4183,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3236,6 +4193,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3255,7 +4215,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3265,6 +4225,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3284,7 +4247,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3294,6 +4257,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3313,7 +4279,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3323,6 +4289,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3342,7 +4311,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3362,7 +4331,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3371,6 +4340,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3393,7 +4365,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3413,7 +4385,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3433,7 +4405,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3442,6 +4414,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3463,7 +4438,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3472,6 +4447,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3493,7 +4471,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3502,6 +4480,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3523,7 +4504,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3532,6 +4513,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3553,7 +4537,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3573,7 +4557,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3594,7 +4578,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3615,7 +4599,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3624,6 +4608,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3646,7 +4633,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3655,6 +4642,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3677,7 +4667,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3687,6 +4677,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3706,7 +4699,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3716,6 +4709,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3735,7 +4731,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3745,6 +4741,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3764,7 +4763,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3773,6 +4772,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3797,7 +4799,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3806,6 +4808,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3814,6 +4819,42 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3823,6 +4864,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3850,7 +4894,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3878,7 +4922,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3888,6 +4932,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3907,7 +4954,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3916,6 +4963,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3933,6 +4983,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3960,7 +5013,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3988,7 +5041,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3998,6 +5051,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4017,7 +5073,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4026,6 +5082,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4043,6 +5102,127 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4070,7 +5250,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4098,7 +5278,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4108,6 +5288,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4127,7 +5310,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4136,11 +5319,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4155,6 +5341,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4182,7 +5371,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4210,7 +5399,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4220,6 +5409,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4239,7 +5431,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4248,6 +5440,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4265,6 +5460,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4331,10 +5529,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4363,6 +5564,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4381,125 +5585,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1809 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x5e, 0x7a, 0xff, 0xb4, 0xb3, 0x3f, 0xd6, 0xce, 0xfe, 0x84, 0x5e, 0xd4, 0xa2, 0xc3, 0x00, - 0x85, 0x9b, 0x26, 0x54, 0xbc, 0x76, 0x52, 0xa3, 0x69, 0x8b, 0x2c, 0x57, 0xbb, 0xf6, 0x06, 0xde, - 0xac, 0x3a, 0xb2, 0x83, 0xb6, 0x70, 0x8b, 0x8e, 0xc4, 0x59, 0xed, 0x64, 0x29, 0x92, 0x9d, 0x19, - 0x2a, 0xab, 0xbb, 0x5e, 0xf4, 0xa2, 0x97, 0x7d, 0x81, 0xa0, 0x0f, 0x50, 0xf4, 0xaa, 0x2f, 0xe1, - 0x02, 0x45, 0x91, 0xcb, 0xa0, 0x05, 0x84, 0x5a, 0x45, 0x5f, 0xc2, 0x57, 0x05, 0x47, 0x43, 0x4a, - 0xfc, 0x91, 0x64, 0x1b, 0xb0, 0xef, 0xc8, 0x39, 0xdf, 0xf7, 0x9d, 0xc3, 0x33, 0x67, 0xce, 0x0c, - 0x07, 0xd8, 0x97, 0xf7, 0xb9, 0x45, 0xfd, 0xea, 0x65, 0xd8, 0x24, 0xcc, 0x23, 0x82, 0xf0, 0x6a, - 0x97, 0x78, 0x8e, 0xcf, 0xaa, 0xca, 0x80, 0x03, 0x5a, 0x0d, 0x7c, 0x97, 0xb6, 0x7a, 0xd5, 0xee, - 0x9d, 0x26, 0x11, 0xf8, 0x4e, 0xb5, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x15, 0x30, 0x5f, 0xf8, - 0xf0, 0xc6, 0x10, 0x6a, 0xe1, 0x80, 0x5a, 0x43, 0xa8, 0xa5, 0xa0, 0x7b, 0x1f, 0xb6, 0xa9, 0xb8, - 0x08, 0x9b, 0x56, 0xcb, 0xef, 0x54, 0xdb, 0x7e, 0xdb, 0xaf, 0x4a, 0x46, 0x33, 0x3c, 0x97, 0x6f, - 0xf2, 0x45, 0x3e, 0x0d, 0x95, 0xf6, 0xcc, 0x31, 0xa7, 0x2d, 0x9f, 0x91, 0x6a, 0x37, 0xe7, 0x6d, - 0xef, 0xde, 0x08, 0xd3, 0xc1, 0xad, 0x0b, 0xea, 0x11, 0xd6, 0xab, 0x06, 0x97, 0xed, 0x68, 0x80, - 0x57, 0x3b, 0x44, 0xe0, 0x22, 0x56, 0x75, 0x12, 0x8b, 0x85, 0x9e, 0xa0, 0x1d, 0x92, 0x23, 0x7c, - 0x32, 0x8b, 0xc0, 0x5b, 0x17, 0xa4, 0x83, 0x73, 0xbc, 0xbb, 0x93, 0x78, 0xa1, 0xa0, 0x6e, 0x95, - 0x7a, 0x82, 0x0b, 0x96, 0x25, 0x99, 0xf7, 0x40, 0xf9, 0xc0, 0x75, 0xfd, 0xaf, 0x89, 0x73, 0xd8, - 0x38, 0xa9, 0x31, 0xda, 0x25, 0x0c, 0xde, 0x02, 0x0b, 0x1e, 0xee, 0x10, 0x5d, 0xbb, 0xa5, 0xdd, - 0x5e, 0xb1, 0xd7, 0x9e, 0xf5, 0x8d, 0xb9, 0x41, 0xdf, 0x58, 0xf8, 0x02, 0x77, 0x08, 0x92, 0x16, - 0xf3, 0x53, 0xb0, 0xa9, 0x58, 0xc7, 0x2e, 0xb9, 0xfa, 0xd2, 0x77, 0xc3, 0x0e, 0x81, 0xdf, 0x07, - 0x4b, 0x8e, 0x14, 0x50, 0xc4, 0x0d, 0x45, 0x5c, 0x1a, 0xca, 0x22, 0x65, 0x35, 0x39, 0xb8, 0xae, - 0xc8, 0x0f, 0x7d, 0x2e, 0xea, 0x58, 0x5c, 0xc0, 0x7d, 0x00, 0x02, 0x2c, 0x2e, 0xea, 0x8c, 0x9c, - 0xd3, 0x2b, 0x45, 0x87, 0x8a, 0x0e, 0xea, 0x89, 0x05, 0x8d, 0xa1, 0xe0, 0x07, 0xa0, 0xc4, 0x08, - 0x76, 0xce, 0x3c, 0xb7, 0xa7, 0x5f, 0xbb, 0xa5, 0xdd, 0x2e, 0xd9, 0x65, 0xc5, 0x28, 0x21, 0x35, - 0x8e, 0x12, 0x84, 0xf9, 0x2f, 0x0d, 0x94, 0x8e, 0xba, 0xb4, 0x25, 0xa8, 0xef, 0xc1, 0xdf, 0x82, - 0x52, 0x34, 0x5b, 0x0e, 0x16, 0x58, 0x3a, 0x5b, 0xdd, 0xff, 0xc8, 0x1a, 0x55, 0x52, 0x92, 0x3c, - 0x2b, 0xb8, 0x6c, 0x47, 0x03, 0xdc, 0x8a, 0xd0, 0x56, 0xf7, 0x8e, 0x75, 0xd6, 0xfc, 0x8a, 0xb4, - 0xc4, 0x29, 0x11, 0x78, 0x14, 0xde, 0x68, 0x0c, 0x25, 0xaa, 0xd0, 0x05, 0xeb, 0x0e, 0x71, 0x89, - 0x20, 0x67, 0x41, 0xe4, 0x91, 0xcb, 0x08, 0x57, 0xf7, 0xef, 0xbe, 0x9c, 0x9b, 0xda, 0x38, 0xd5, - 0xde, 0x1c, 0xf4, 0x8d, 0xf5, 0xd4, 0x10, 0x4a, 0x8b, 0x9b, 0xdf, 0x68, 0x60, 0xf7, 0xb8, 0xf1, - 0x80, 0xf9, 0x61, 0xd0, 0x10, 0xd1, 0xec, 0xb6, 0x7b, 0xca, 0x04, 0x7f, 0x04, 0x16, 0x58, 0xe8, - 0xc6, 0x73, 0xf9, 0x5e, 0x3c, 0x97, 0x28, 0x74, 0xc9, 0x8b, 0xbe, 0xb1, 0x95, 0x61, 0x3d, 0xee, - 0x05, 0x04, 0x49, 0x02, 0xfc, 0x1c, 0x2c, 0x31, 0xec, 0xb5, 0x49, 0x14, 0xfa, 0xfc, 0xed, 0xd5, - 0x7d, 0xd3, 0x9a, 0xb8, 0xd6, 0xac, 0x93, 0x1a, 0x8a, 0xa0, 0xa3, 0x19, 0x97, 0xaf, 0x1c, 0x29, - 0x05, 0xf3, 0x14, 0xac, 0xcb, 0xa9, 0xf6, 0x99, 0x90, 0x16, 0x78, 0x13, 0xcc, 0x77, 0xa8, 0x27, - 0x83, 0x5a, 0xb4, 0x57, 0x15, 0x6b, 0xfe, 0x94, 0x7a, 0x28, 0x1a, 0x97, 0x66, 0x7c, 0x25, 0x73, - 0x36, 0x6e, 0xc6, 0x57, 0x28, 0x1a, 0x37, 0x1f, 0x80, 0x65, 0xe5, 0x71, 0x5c, 0x68, 0x7e, 0xba, - 0xd0, 0x7c, 0x81, 0xd0, 0x5f, 0xae, 0x81, 0xad, 0xba, 0xef, 0xd4, 0x28, 0x67, 0xa1, 0xcc, 0x97, - 0x1d, 0x3a, 0x6d, 0x22, 0xde, 0x42, 0x7d, 0x3c, 0x06, 0x0b, 0x3c, 0x20, 0x2d, 0x55, 0x16, 0xfb, - 0x53, 0x72, 0x5b, 0x10, 0x5f, 0x23, 0x20, 0xad, 0xd1, 0xb2, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x4f, - 0xc1, 0x12, 0x17, 0x58, 0x84, 0x5c, 0x9f, 0x97, 0xba, 0xf7, 0x5e, 0x51, 0x57, 0x72, 0x47, 0xb3, - 0x38, 0x7c, 0x47, 0x4a, 0xd3, 0xfc, 0x87, 0x06, 0xde, 0x29, 0x60, 0x3d, 0xa2, 0x5c, 0xc0, 0xa7, - 0xb9, 0x8c, 0x59, 0x2f, 0x97, 0xb1, 0x88, 0x2d, 0xf3, 0x95, 0x2c, 0xde, 0x78, 0x64, 0x2c, 0x5b, - 0x0d, 0xb0, 0x48, 0x05, 0xe9, 0xc4, 0xa5, 0x68, 0xbd, 0xda, 0x67, 0xd9, 0xeb, 0x4a, 0x7a, 0xf1, - 0x24, 0x12, 0x41, 0x43, 0x2d, 0xf3, 0x9f, 0xd7, 0x0a, 0x3f, 0x27, 0x4a, 0x27, 0x3c, 0x07, 0x6b, - 0x1d, 0xea, 0x1d, 0x74, 0x31, 0x75, 0x71, 0x53, 0xad, 0x9e, 0x69, 0x45, 0x10, 0x75, 0x58, 0x6b, - 0xd8, 0x61, 0xad, 0x13, 0x4f, 0x9c, 0xb1, 0x86, 0x60, 0xd4, 0x6b, 0xdb, 0xe5, 0x41, 0xdf, 0x58, - 0x3b, 0x1d, 0x53, 0x42, 0x29, 0x5d, 0xf8, 0x6b, 0x50, 0xe2, 0xc4, 0x25, 0x2d, 0xe1, 0xb3, 0x57, - 0xeb, 0x10, 0x8f, 0x70, 0x93, 0xb8, 0x0d, 0x45, 0xb5, 0xd7, 0xa2, 0xbc, 0xc5, 0x6f, 0x28, 0x91, - 0x84, 0x2e, 0xd8, 0xe8, 0xe0, 0xab, 0x27, 0x1e, 0x4e, 0x3e, 0x64, 0xfe, 0x35, 0x3f, 0x04, 0x0e, - 0xfa, 0xc6, 0xc6, 0x69, 0x4a, 0x0b, 0x65, 0xb4, 0xcd, 0xff, 0x2d, 0x80, 0x1b, 0x13, 0xab, 0x0a, - 0x7e, 0x0e, 0xa0, 0xdf, 0xe4, 0x84, 0x75, 0x89, 0xf3, 0x60, 0xb8, 0x07, 0x51, 0x3f, 0x5e, 0xb8, - 0x7b, 0x6a, 0x82, 0xe0, 0x59, 0x0e, 0x81, 0x0a, 0x58, 0xf0, 0x0f, 0x1a, 0x58, 0x77, 0x86, 0x6e, - 0x88, 0x53, 0xf7, 0x9d, 0xb8, 0x30, 0x1e, 0xbc, 0x4e, 0xbd, 0x5b, 0xb5, 0x71, 0xa5, 0x23, 0x4f, - 0xb0, 0x9e, 0xbd, 0xa3, 0x02, 0x5a, 0x4f, 0xd9, 0x50, 0xda, 0x29, 0x3c, 0x05, 0xd0, 0x49, 0x24, - 0xb9, 0xda, 0xd3, 0x64, 0x8a, 0x17, 0xed, 0x9b, 0x4a, 0x61, 0x27, 0xe5, 0x37, 0x06, 0xa1, 0x02, - 0x22, 0xfc, 0x19, 0xd8, 0x68, 0x85, 0x8c, 0x11, 0x4f, 0x3c, 0x24, 0xd8, 0x15, 0x17, 0x3d, 0x7d, - 0x41, 0x4a, 0xed, 0x2a, 0xa9, 0x8d, 0xc3, 0x94, 0x15, 0x65, 0xd0, 0x11, 0xdf, 0x21, 0x9c, 0x32, - 0xe2, 0xc4, 0xfc, 0xc5, 0x34, 0xbf, 0x96, 0xb2, 0xa2, 0x0c, 0x1a, 0xde, 0x07, 0x6b, 0xe4, 0x2a, - 0x20, 0xad, 0x38, 0xa7, 0x4b, 0x92, 0xbd, 0xad, 0xd8, 0x6b, 0x47, 0x63, 0x36, 0x94, 0x42, 0xee, - 0xb9, 0x00, 0xe6, 0x93, 0x08, 0xcb, 0x60, 0xfe, 0x92, 0xf4, 0x86, 0x3b, 0x0f, 0x8a, 0x1e, 0xe1, - 0x67, 0x60, 0xb1, 0x8b, 0xdd, 0x90, 0xa8, 0x5a, 0x7f, 0xff, 0xe5, 0x6a, 0xfd, 0x31, 0xed, 0x10, - 0x34, 0x24, 0xfe, 0xf8, 0xda, 0x7d, 0xcd, 0xfc, 0xbb, 0x06, 0x36, 0xeb, 0xbe, 0xd3, 0x20, 0xad, - 0x90, 0x51, 0xd1, 0xab, 0xcb, 0x79, 0x7e, 0x0b, 0x3d, 0x1b, 0xa5, 0x7a, 0xf6, 0x47, 0xd3, 0x6b, - 0x2d, 0x1d, 0xdd, 0xa4, 0x8e, 0x6d, 0x3e, 0xd3, 0xc0, 0x4e, 0x0e, 0xfd, 0x16, 0x3a, 0xea, 0xcf, - 0xd3, 0x1d, 0xf5, 0x83, 0x57, 0xf9, 0x98, 0x09, 0xfd, 0xf4, 0xdf, 0xe5, 0x82, 0x4f, 0x91, 0xdd, - 0x34, 0x3a, 0xdd, 0x31, 0xda, 0xa5, 0x2e, 0x69, 0x13, 0x47, 0x7e, 0x4c, 0x69, 0xec, 0x74, 0x97, - 0x58, 0xd0, 0x18, 0x0a, 0x72, 0xb0, 0xeb, 0x90, 0x73, 0x1c, 0xba, 0xe2, 0xc0, 0x71, 0x0e, 0x71, - 0x80, 0x9b, 0xd4, 0xa5, 0x82, 0xaa, 0xe3, 0xc8, 0x8a, 0xfd, 0xe9, 0xa0, 0x6f, 0xec, 0xd6, 0x0a, - 0x11, 0x2f, 0xfa, 0xc6, 0xcd, 0xfc, 0x69, 0xde, 0x4a, 0x20, 0x3d, 0x34, 0x41, 0x1a, 0xf6, 0x80, - 0xce, 0xc8, 0xef, 0xc2, 0x68, 0x51, 0xd4, 0x98, 0x1f, 0xa4, 0xdc, 0xce, 0x4b, 0xb7, 0x3f, 0x1d, - 0xf4, 0x0d, 0x1d, 0x4d, 0xc0, 0xcc, 0x76, 0x3c, 0x51, 0x1e, 0x7e, 0x05, 0xb6, 0xb0, 0x3a, 0x87, - 0x8f, 0x7b, 0x5d, 0x90, 0x5e, 0xef, 0x0f, 0xfa, 0xc6, 0xd6, 0x41, 0xde, 0x3c, 0xdb, 0x61, 0x91, - 0x28, 0xac, 0x82, 0xe5, 0xae, 0x3c, 0xb2, 0x73, 0x7d, 0x51, 0xea, 0xef, 0x0c, 0xfa, 0xc6, 0xf2, - 0xf0, 0x14, 0x1f, 0x69, 0x2e, 0x1d, 0x37, 0xe4, 0x41, 0x30, 0x46, 0xc1, 0x8f, 0xc1, 0xea, 0x85, - 0xcf, 0xc5, 0x17, 0x44, 0x7c, 0xed, 0xb3, 0x4b, 0xd9, 0x18, 0x4a, 0xf6, 0x96, 0x9a, 0xc1, 0xd5, - 0x87, 0x23, 0x13, 0x1a, 0xc7, 0xc1, 0x5f, 0x82, 0x95, 0x0b, 0x75, 0xec, 0xe3, 0xfa, 0xb2, 0x2c, - 0xb4, 0xdb, 0x53, 0x0a, 0x2d, 0x75, 0x44, 0xb4, 0x37, 0x95, 0xfc, 0x4a, 0x3c, 0xcc, 0xd1, 0x48, - 0x0d, 0xfe, 0x00, 0x2c, 0xcb, 0x97, 0x93, 0x9a, 0x5e, 0x92, 0xd1, 0x5c, 0x57, 0xf0, 0xe5, 0x87, - 0xc3, 0x61, 0x14, 0xdb, 0x63, 0xe8, 0x49, 0xfd, 0x50, 0x5f, 0xc9, 0x43, 0x4f, 0xea, 0x87, 0x28, - 0xb6, 0xc3, 0xa7, 0x60, 0x99, 0x93, 0x47, 0xd4, 0x0b, 0xaf, 0x74, 0x20, 0x97, 0xdc, 0x9d, 0x29, - 0xe1, 0x36, 0x8e, 0x24, 0x32, 0x73, 0xe0, 0x1e, 0xa9, 0x2b, 0x3b, 0x8a, 0x25, 0xa1, 0x03, 0x56, - 0x58, 0xe8, 0x1d, 0xf0, 0x27, 0x9c, 0x30, 0x7d, 0x35, 0xb7, 0xdb, 0x67, 0xf5, 0x51, 0x8c, 0xcd, - 0x7a, 0x48, 0x32, 0x93, 0x20, 0xd0, 0x48, 0x18, 0xfe, 0x51, 0x03, 0x90, 0x87, 0x41, 0xe0, 0x92, - 0x0e, 0xf1, 0x04, 0x76, 0xe5, 0xf9, 0x9e, 0xeb, 0x6b, 0xd2, 0xdf, 0x4f, 0xa6, 0x7d, 0x4f, 0x8e, - 0x94, 0x75, 0x9c, 0x6c, 0xd3, 0x79, 0x28, 0x2a, 0xf0, 0x19, 0xa5, 0xf3, 0x9c, 0xcb, 0x67, 0x7d, - 0x7d, 0x66, 0x3a, 0x8b, 0xff, 0x5f, 0x46, 0xe9, 0x54, 0x76, 0x14, 0x4b, 0xc2, 0x2f, 0xc1, 0x6e, - 0xfc, 0x77, 0x87, 0x7c, 0x5f, 0x1c, 0x53, 0x97, 0xf0, 0x1e, 0x17, 0xa4, 0xa3, 0x6f, 0xc8, 0x69, - 0xae, 0x28, 0xe6, 0x2e, 0x2a, 0x44, 0xa1, 0x09, 0x6c, 0xd8, 0x01, 0x46, 0xdc, 0x1e, 0xa2, 0xb5, - 0x93, 0xf4, 0xa7, 0x23, 0xde, 0xc2, 0xee, 0xf0, 0xd4, 0x72, 0x5d, 0x3a, 0x78, 0x6f, 0xd0, 0x37, - 0x8c, 0xda, 0x74, 0x28, 0x9a, 0xa5, 0x05, 0x7f, 0x01, 0x74, 0x3c, 0xc9, 0x4f, 0x59, 0xfa, 0xf9, - 0x5e, 0xd4, 0x73, 0x26, 0x3a, 0x98, 0xc8, 0x86, 0x01, 0x28, 0xe3, 0xf4, 0x7f, 0x36, 0xd7, 0x37, - 0xe5, 0x2a, 0x7c, 0x7f, 0xca, 0x3c, 0x64, 0x7e, 0xcd, 0x6d, 0x5d, 0xa5, 0xb1, 0x9c, 0x31, 0x70, - 0x94, 0x53, 0x87, 0x57, 0x00, 0xe2, 0xec, 0xb5, 0x00, 0xd7, 0xe1, 0xcc, 0x2d, 0x26, 0x77, 0x97, - 0x30, 0x2a, 0xb5, 0x9c, 0x89, 0xa3, 0x02, 0x1f, 0xf0, 0x11, 0xd8, 0x56, 0xa3, 0x4f, 0x3c, 0x8e, - 0xcf, 0x49, 0xa3, 0xc7, 0x5b, 0xc2, 0xe5, 0xfa, 0x96, 0xec, 0x6f, 0xfa, 0xa0, 0x6f, 0x6c, 0x1f, - 0x14, 0xd8, 0x51, 0x21, 0x0b, 0x7e, 0x06, 0xca, 0xe7, 0x3e, 0x6b, 0x52, 0xc7, 0x21, 0x5e, 0xac, - 0xb4, 0x2d, 0x95, 0xb6, 0xa3, 0x4c, 0x1c, 0x67, 0x6c, 0x28, 0x87, 0x86, 0x1c, 0xec, 0x28, 0xe5, - 0x3a, 0xf3, 0x5b, 0xa7, 0x7e, 0xe8, 0x89, 0xa8, 0xa5, 0x72, 0x7d, 0x27, 0xd9, 0x46, 0x76, 0x0e, - 0x8a, 0x00, 0x2f, 0xfa, 0xc6, 0xad, 0x82, 0x96, 0x9e, 0x02, 0xa1, 0x62, 0x6d, 0xe8, 0x00, 0x20, - 0xfb, 0xc0, 0x70, 0xc9, 0xed, 0xce, 0xfc, 0x05, 0x44, 0x09, 0x38, 0xbb, 0xea, 0x36, 0xa2, 0x9d, - 0x79, 0x64, 0x46, 0x63, 0xba, 0x50, 0x80, 0x4d, 0x9c, 0xb9, 0x31, 0xe2, 0xfa, 0x3b, 0x72, 0x8e, - 0x7f, 0x38, 0x7b, 0x8e, 0x13, 0x8e, 0x7d, 0x43, 0x4d, 0xf1, 0x66, 0xd6, 0xc2, 0x51, 0xde, 0x81, - 0xf9, 0x67, 0x0d, 0xdc, 0x98, 0x18, 0x2f, 0xfc, 0x24, 0x75, 0xcb, 0x61, 0x66, 0x6e, 0x39, 0x60, - 0x9e, 0xf8, 0x06, 0x2e, 0x39, 0xbe, 0xd1, 0x80, 0x3e, 0xa9, 0x67, 0xc3, 0x8f, 0x53, 0x01, 0xbe, - 0x9b, 0x09, 0x70, 0x33, 0xc7, 0x7b, 0x03, 0xf1, 0xfd, 0x55, 0x03, 0xbb, 0xc5, 0x7b, 0x16, 0xbc, - 0x9b, 0x8a, 0xce, 0xc8, 0x44, 0x77, 0x3d, 0xc3, 0x52, 0xb1, 0xfd, 0x06, 0x6c, 0xa8, 0x9d, 0x2d, - 0x7d, 0xc7, 0x95, 0x8a, 0x31, 0xaa, 0xdf, 0xe8, 0x50, 0xaa, 0x24, 0xe2, 0xfa, 0x92, 0xbf, 0x93, - 0xe9, 0x31, 0x94, 0x51, 0x33, 0xff, 0xa6, 0x81, 0x77, 0x67, 0xee, 0x49, 0xd0, 0x4e, 0x85, 0x6e, - 0x65, 0x42, 0xaf, 0x4c, 0x16, 0x78, 0x33, 0x57, 0x5d, 0xf6, 0x87, 0xcf, 0x9e, 0x57, 0xe6, 0xbe, - 0x7d, 0x5e, 0x99, 0xfb, 0xee, 0x79, 0x65, 0xee, 0xf7, 0x83, 0x8a, 0xf6, 0x6c, 0x50, 0xd1, 0xbe, - 0x1d, 0x54, 0xb4, 0xef, 0x06, 0x15, 0xed, 0x3f, 0x83, 0x8a, 0xf6, 0xa7, 0xff, 0x56, 0xe6, 0x7e, - 0xb5, 0xac, 0xe4, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf9, 0x85, 0x0c, 0x05, 0x17, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 738c82c6..7be0a7f7 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -186,7 +186,7 @@ message PodDisruptionBudgetStatus { // that will be applied to a pod and container. message PodSecurityPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -198,7 +198,7 @@ message PodSecurityPolicy { // PodSecurityPolicyList is a list of PodSecurityPolicy objects. message PodSecurityPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -299,7 +299,8 @@ message PodSecurityPolicySpec { repeated AllowedFlexVolume allowedFlexVolumes = 18; // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value means no CSI drivers can run inline within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. // +optional repeated AllowedCSIDriver allowedCSIDrivers = 23; @@ -329,6 +330,12 @@ message PodSecurityPolicySpec { // This requires the ProcMountType feature flag to be enabled. // +optional repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; } // RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. @@ -353,6 +360,21 @@ message RunAsUserStrategyOptions { repeated IDRange ranges = 2; } +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. message SELinuxStrategyOptions { // rule is the strategy that will dictate the allowable labels that may be set. diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 74ddd069..b3001bb5 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -134,7 +134,7 @@ type Eviction struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -217,7 +217,8 @@ type PodSecurityPolicySpec struct { // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value means no CSI drivers can run inline within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. // +optional AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -244,6 +245,11 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -449,13 +455,32 @@ const ( SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 324116d5..0bbc48f9 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -140,7 +140,7 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -150,7 +150,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of schema objects.", } @@ -179,10 +179,11 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -209,6 +210,16 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 24b078ed..75851e12 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -191,7 +191,7 @@ func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object { func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodDisruptionBudget, len(*in)) @@ -305,7 +305,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -411,6 +411,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } return } @@ -466,6 +471,32 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 708db327..9bb48fc3 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") @@ -123,10 +394,70 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptor_979ffd7b30c07419) +} + +var fileDescriptor_979ffd7b30c07419 = []byte{ + // 807 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, + 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, + 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, + 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, + 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, + 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, + 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, + 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, + 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, + 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, + 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, + 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, + 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, + 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, + 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, + 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, + 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, + 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, + 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, + 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, + 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, + 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, + 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, + 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, + 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, + 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, + 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, + 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, + 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, + 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, + 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, + 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, + 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, + 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, + 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, + 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, + 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, + 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, + 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, + 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, + 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, + 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, + 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, + 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, + 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, + 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, + 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, + 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, + 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, + 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -134,29 +465,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -164,47 +502,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -212,45 +561,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -258,37 +618,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -296,37 +665,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -334,92 +712,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -427,37 +780,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -465,45 +827,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,37 +884,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -549,37 +931,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -587,29 +978,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -617,39 +1016,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -662,6 +1075,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -680,6 +1096,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -696,6 +1115,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -710,6 +1132,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -724,6 +1149,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -760,6 +1188,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -774,6 +1205,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -790,6 +1224,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -804,6 +1241,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -818,6 +1258,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -830,6 +1273,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -844,14 +1290,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -860,8 +1299,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -870,10 +1314,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -882,9 +1331,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -894,9 +1348,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -905,9 +1364,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -930,9 +1394,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -941,9 +1410,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -953,9 +1427,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -964,9 +1443,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1019,7 +1503,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1047,7 +1531,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1056,10 +1540,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1073,6 +1560,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1100,7 +1590,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1128,7 +1618,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1137,6 +1627,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1158,7 +1651,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1167,6 +1660,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1189,7 +1685,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1198,6 +1694,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1217,6 +1716,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1244,7 +1746,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1272,7 +1774,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1281,6 +1783,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1807,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1311,6 +1816,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,7 +1841,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1342,6 +1850,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1358,6 +1869,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1385,7 +1899,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1413,7 +1927,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1422,6 +1936,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,7 +1960,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1452,6 +1969,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,6 +1989,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1496,7 +2019,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1524,7 +2047,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1533,6 +2056,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1554,7 +2080,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1563,6 +2089,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1580,6 +2109,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1607,7 +2139,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,7 +2167,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1645,6 +2177,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1664,7 +2199,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1674,6 +2209,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1693,7 +2231,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1703,6 +2241,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1722,7 +2263,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1732,6 +2273,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1751,7 +2295,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1761,6 +2305,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1775,6 +2322,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1802,7 +2352,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,7 +2380,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1839,6 +2389,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1860,7 +2413,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1869,6 +2422,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1886,6 +2442,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1913,7 +2472,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1941,7 +2500,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1950,6 +2509,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1971,7 +2533,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1980,6 +2542,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2002,7 +2567,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2011,6 +2576,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2027,6 +2595,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2054,7 +2625,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2082,7 +2653,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2091,6 +2662,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2112,7 +2686,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2121,6 +2695,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2138,6 +2715,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2165,7 +2745,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2193,7 +2773,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2202,6 +2782,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2223,7 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2232,6 +2815,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2249,6 +2835,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2276,7 +2865,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2304,7 +2893,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2314,6 +2903,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2333,7 +2925,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2343,6 +2935,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2362,7 +2957,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2372,6 +2967,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2386,6 +2984,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2413,7 +3014,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2441,7 +3042,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2451,6 +3052,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2470,7 +3074,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2480,6 +3084,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2499,7 +3106,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2509,6 +3116,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2528,7 +3138,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2538,6 +3148,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2552,6 +3165,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2618,10 +3234,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2650,6 +3269,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2668,62 +3290,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 807 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, - 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, - 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, - 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, - 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, - 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, - 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, - 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, - 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, - 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, - 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, - 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, - 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, - 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, - 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, - 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, - 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, - 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, - 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, - 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, - 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, - 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, - 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, - 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, - 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, - 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, - 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, - 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, - 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, - 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, - 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, - 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, - 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, - 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, - 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, - 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, - 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, - 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, - 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, - 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, - 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, - 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, - 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, - 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, - 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, - 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, - 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, - 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, - 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, - 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 07eb321e..095a5e9c 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index e035b331..19336247 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") @@ -123,10 +394,71 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_b59b0bd5e7cb9590) +} + +var fileDescriptor_b59b0bd5e7cb9590 = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, + 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, + 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, + 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, + 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, + 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, + 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, + 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, + 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, + 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, + 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, + 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, + 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, + 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, + 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, + 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, + 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, + 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, + 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, + 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, + 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, + 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, + 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, + 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, + 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, + 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, + 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, + 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, + 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, + 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, + 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, + 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, + 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, + 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, + 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, + 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, + 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, + 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, + 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, + 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, + 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, + 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, + 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, + 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, + 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, + 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, + 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, + 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, + 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -134,29 +466,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -164,47 +503,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -212,45 +562,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -258,37 +619,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -296,37 +666,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -334,92 +713,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x2a } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -427,37 +781,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -465,45 +828,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,37 +885,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -549,37 +932,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -587,29 +979,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -617,39 +1017,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -662,6 +1076,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -680,6 +1097,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -696,6 +1116,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -710,6 +1133,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -724,6 +1150,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -760,6 +1189,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -774,6 +1206,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -790,6 +1225,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -804,6 +1242,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -818,6 +1259,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -830,6 +1274,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -844,14 +1291,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -860,8 +1300,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -870,10 +1315,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -882,9 +1332,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -894,9 +1349,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -905,9 +1365,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -930,9 +1395,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -941,9 +1411,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -953,9 +1428,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -964,9 +1444,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1019,7 +1504,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1047,7 +1532,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1056,10 +1541,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1073,6 +1561,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1100,7 +1591,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1128,7 +1619,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1137,6 +1628,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1158,7 +1652,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1167,6 +1661,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1189,7 +1686,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1198,6 +1695,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1217,6 +1717,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1244,7 +1747,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1272,7 +1775,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1281,6 +1784,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1808,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1311,6 +1817,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,7 +1842,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1342,6 +1851,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1358,6 +1870,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1385,7 +1900,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1413,7 +1928,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1422,6 +1937,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,7 +1961,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1452,6 +1970,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,6 +1990,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1496,7 +2020,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1524,7 +2048,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1533,6 +2057,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1554,7 +2081,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1563,6 +2090,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1580,6 +2110,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1607,7 +2140,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,7 +2168,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1645,6 +2178,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1664,7 +2200,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1674,6 +2210,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1693,7 +2232,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1703,6 +2242,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1722,7 +2264,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1732,6 +2274,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1751,7 +2296,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1761,6 +2306,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1775,6 +2323,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1802,7 +2353,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,7 +2381,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1839,6 +2390,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1860,7 +2414,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1869,6 +2423,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1886,6 +2443,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1913,7 +2473,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1941,7 +2501,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1950,6 +2510,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1971,7 +2534,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1980,6 +2543,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2002,7 +2568,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2011,6 +2577,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2027,6 +2596,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2054,7 +2626,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2082,7 +2654,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2091,6 +2663,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2112,7 +2687,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2121,6 +2696,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2138,6 +2716,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2165,7 +2746,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2193,7 +2774,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2202,6 +2783,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2223,7 +2807,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2232,6 +2816,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2249,6 +2836,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2276,7 +2866,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2304,7 +2894,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2314,6 +2904,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2333,7 +2926,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2343,6 +2936,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2362,7 +2958,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2372,6 +2968,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2386,6 +2985,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2413,7 +3015,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2441,7 +3043,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2451,6 +3053,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2470,7 +3075,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2480,6 +3085,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2499,7 +3107,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2509,6 +3117,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2528,7 +3139,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2538,6 +3149,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2552,6 +3166,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2618,10 +3235,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2650,6 +3270,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2668,63 +3291,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 830 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, - 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, - 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, - 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, - 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, - 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, - 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, - 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, - 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, - 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, - 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, - 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, - 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, - 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, - 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, - 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, - 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, - 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, - 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, - 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, - 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, - 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, - 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, - 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, - 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, - 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, - 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, - 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, - 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, - 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, - 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, - 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, - 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, - 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, - 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, - 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, - 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, - 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, - 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, - 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, - 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, - 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, - 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, - 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, - 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, - 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, - 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, - 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, - 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, - 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, - 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index 97f63331..0358227f 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 904a6e7a..6c80f52f 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") @@ -123,10 +394,70 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_99f6bec96facc83d) +} + +var fileDescriptor_99f6bec96facc83d = []byte{ + // 808 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, + 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, + 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, + 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, + 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, + 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, + 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, + 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, + 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, + 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, + 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, + 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, + 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, + 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, + 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, + 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, + 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, + 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, + 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, + 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, + 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, + 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, + 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, + 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, + 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, + 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, + 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, + 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, + 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, + 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, + 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, + 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, + 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, + 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, + 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, + 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, + 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, + 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, + 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, + 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, + 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, + 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, + 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, + 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, + 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, + 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, + 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, + 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, + 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, + 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -134,29 +465,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -164,47 +502,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -212,45 +561,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -258,37 +618,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -296,37 +665,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -334,92 +712,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -427,37 +780,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -465,45 +827,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,37 +884,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -549,37 +931,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -587,29 +978,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -617,39 +1016,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -662,6 +1075,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -680,6 +1096,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -696,6 +1115,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -710,6 +1132,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -724,6 +1149,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -760,6 +1188,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -774,6 +1205,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -790,6 +1224,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -804,6 +1241,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -818,6 +1258,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -830,6 +1273,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -844,14 +1290,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -860,8 +1299,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -870,10 +1314,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -882,9 +1331,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -894,9 +1348,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -905,9 +1364,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -930,9 +1394,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -941,9 +1410,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -953,9 +1427,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -964,9 +1443,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1019,7 +1503,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1047,7 +1531,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1056,10 +1540,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1073,6 +1560,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1100,7 +1590,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1128,7 +1618,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1137,6 +1627,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1158,7 +1651,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1167,6 +1660,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1189,7 +1685,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1198,6 +1694,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1217,6 +1716,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1244,7 +1746,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1272,7 +1774,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1281,6 +1783,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1807,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1311,6 +1816,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,7 +1841,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1342,6 +1850,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1358,6 +1869,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1385,7 +1899,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1413,7 +1927,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1422,6 +1936,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,7 +1960,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1452,6 +1969,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,6 +1989,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1496,7 +2019,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1524,7 +2047,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1533,6 +2056,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1554,7 +2080,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1563,6 +2089,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1580,6 +2109,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1607,7 +2139,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,7 +2167,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1645,6 +2177,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1664,7 +2199,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1674,6 +2209,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1693,7 +2231,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1703,6 +2241,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1722,7 +2263,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1732,6 +2273,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1751,7 +2295,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1761,6 +2305,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1775,6 +2322,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1802,7 +2352,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,7 +2380,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1839,6 +2389,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1860,7 +2413,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1869,6 +2422,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1886,6 +2442,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1913,7 +2472,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1941,7 +2500,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1950,6 +2509,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1971,7 +2533,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1980,6 +2542,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2002,7 +2567,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2011,6 +2576,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2027,6 +2595,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2054,7 +2625,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2082,7 +2653,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2091,6 +2662,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2112,7 +2686,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2121,6 +2695,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2138,6 +2715,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2165,7 +2745,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2193,7 +2773,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2202,6 +2782,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2223,7 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2232,6 +2815,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2249,6 +2835,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2276,7 +2865,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2304,7 +2893,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2314,6 +2903,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2333,7 +2925,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2343,6 +2935,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2362,7 +2957,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2372,6 +2967,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2386,6 +2984,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2413,7 +3014,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2441,7 +3042,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2451,6 +3052,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2470,7 +3074,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2480,6 +3084,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2499,7 +3106,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2509,6 +3116,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2528,7 +3138,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2538,6 +3148,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2552,6 +3165,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2618,10 +3234,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2650,6 +3269,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2668,62 +3290,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 808 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, - 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, - 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, - 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, - 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, - 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, - 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, - 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, - 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, - 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, - 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, - 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, - 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, - 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, - 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, - 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, - 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, - 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, - 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, - 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, - 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, - 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, - 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, - 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, - 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, - 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, - 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, - 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, - 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, - 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, - 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, - 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, - 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, - 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, - 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, - 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, - 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, - 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, - 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, - 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, - 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, - 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, - 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, - 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, - 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, - 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, - 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, - 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, - 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, - 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index c085c90b..7ffe5810 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go index 5adf978e..7e9764f1 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -17,26 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. +package v1 - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto +import ( + fmt "fmt" - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1 + io "io" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_277b2f43b72fffd5) +} + +var fileDescriptor_277b2f43b72fffd5 = []byte{ + // 488 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, + 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, + 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, + 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, + 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, + 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, + 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, + 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, + 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, + 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, + 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, + 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, + 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, + 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, + 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, + 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, + 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, + 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, + 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, + 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, + 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, + 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, + 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, + 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, + 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, + 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, + 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, + 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, + 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, + 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -72,40 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -113,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -158,10 +271,17 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -176,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -193,10 +306,11 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -205,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -235,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -263,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -272,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -293,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -312,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -332,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -342,11 +464,47 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -356,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -383,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -411,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -441,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -450,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -467,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -533,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -565,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -583,39 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 442 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x3f, 0x8b, 0xd4, 0x40, - 0x18, 0xc6, 0x33, 0x7b, 0x2e, 0xac, 0xb3, 0x2c, 0x68, 0x44, 0x08, 0x5b, 0xcc, 0x85, 0xb3, 0x30, - 0x8d, 0x33, 0xee, 0xa1, 0x22, 0x58, 0x19, 0x0f, 0x44, 0x38, 0x51, 0x52, 0x58, 0x88, 0x85, 0x93, - 0xe4, 0xbd, 0xec, 0xb8, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xd7, 0x59, 0x5b, 0xf9, 0x8d, 0x6c, 0xb7, - 0xbc, 0xf2, 0xaa, 0xc3, 0x8d, 0x5f, 0x44, 0xf2, 0xc7, 0xcb, 0xae, 0xe7, 0xe1, 0x75, 0x99, 0xe7, - 0x7d, 0x7e, 0xcf, 0x3b, 0x79, 0x18, 0xfc, 0x72, 0xf5, 0x5c, 0x53, 0x21, 0xd9, 0xaa, 0x0c, 0x41, - 0xe5, 0x60, 0x40, 0xb3, 0x0a, 0xf2, 0x58, 0x2a, 0xd6, 0x0f, 0x78, 0x21, 0x98, 0x8e, 0x96, 0x10, - 0x97, 0xa9, 0xc8, 0x13, 0x56, 0x2d, 0x58, 0x02, 0x39, 0x28, 0x6e, 0x20, 0xa6, 0x85, 0x92, 0x46, - 0xda, 0x4e, 0xe7, 0xa4, 0xbc, 0x10, 0x74, 0x70, 0xd2, 0x6a, 0x31, 0x7f, 0x94, 0x08, 0xb3, 0x2c, - 0x43, 0x1a, 0xc9, 0x8c, 0x25, 0x32, 0x91, 0xac, 0x05, 0xc2, 0xf2, 0xa4, 0x3d, 0xb5, 0x87, 0xf6, - 0xab, 0x0b, 0x9a, 0x3f, 0x19, 0x56, 0x66, 0x3c, 0x5a, 0x8a, 0x1c, 0xd4, 0x29, 0x2b, 0x56, 0x49, - 0x23, 0x68, 0x96, 0x81, 0xe1, 0xff, 0x58, 0x3f, 0x67, 0xd7, 0x51, 0xaa, 0xcc, 0x8d, 0xc8, 0xe0, - 0x0a, 0xf0, 0xec, 0x7f, 0x40, 0xf3, 0x13, 0x19, 0xff, 0x9b, 0x3b, 0xf8, 0x36, 0xc2, 0xb3, 0xf7, - 0x4a, 0x48, 0x25, 0xcc, 0xe9, 0xab, 0x94, 0x6b, 0x6d, 0x7f, 0xc6, 0x93, 0xe6, 0x56, 0x31, 0x37, - 0xdc, 0x41, 0x2e, 0xf2, 0xa6, 0x87, 0x8f, 0xe9, 0x50, 0xc6, 0x65, 0x38, 0x2d, 0x56, 0x49, 0x23, - 0x68, 0xda, 0xb8, 0x69, 0xb5, 0xa0, 0xef, 0xc2, 0x2f, 0x10, 0x99, 0xb7, 0x60, 0xb8, 0x6f, 0xaf, - 0x2f, 0xf6, 0xad, 0xfa, 0x62, 0x1f, 0x0f, 0x5a, 0x70, 0x99, 0x6a, 0x3f, 0xc0, 0xe3, 0x8a, 0xa7, - 0x25, 0x38, 0x23, 0x17, 0x79, 0x63, 0x7f, 0xd6, 0x9b, 0xc7, 0x1f, 0x1a, 0x31, 0xe8, 0x66, 0xf6, - 0x0b, 0x3c, 0x4b, 0x52, 0x19, 0xf2, 0xf4, 0x08, 0x4e, 0x78, 0x99, 0x1a, 0x67, 0xcf, 0x45, 0xde, - 0xc4, 0xbf, 0xdf, 0x9b, 0x67, 0xaf, 0xb7, 0x87, 0xc1, 0xae, 0xd7, 0x7e, 0x8a, 0xa7, 0x31, 0xe8, - 0x48, 0x89, 0xc2, 0x08, 0x99, 0x3b, 0xb7, 0x5c, 0xe4, 0xdd, 0xf6, 0xef, 0xf5, 0xe8, 0xf4, 0x68, - 0x18, 0x05, 0xdb, 0xbe, 0x83, 0x1f, 0x08, 0xdf, 0xdd, 0x29, 0xe3, 0x58, 0x68, 0x63, 0x7f, 0xba, - 0x52, 0x08, 0xbd, 0x59, 0x21, 0x0d, 0xdd, 0xd6, 0x71, 0xa7, 0xdf, 0x3c, 0xf9, 0xa3, 0x6c, 0x95, - 0x71, 0x8c, 0xc7, 0xc2, 0x40, 0xa6, 0x9d, 0x91, 0xbb, 0xe7, 0x4d, 0x0f, 0x1f, 0xd2, 0xeb, 0x1e, - 0x1e, 0xdd, 0xb9, 0xd9, 0xd0, 0xda, 0x9b, 0x86, 0x0e, 0xba, 0x10, 0xdf, 0x5b, 0x6f, 0x88, 0x75, - 0xb6, 0x21, 0xd6, 0xf9, 0x86, 0x58, 0x5f, 0x6b, 0x82, 0xd6, 0x35, 0x41, 0x67, 0x35, 0x41, 0xe7, - 0x35, 0x41, 0x3f, 0x6b, 0x82, 0xbe, 0xff, 0x22, 0xd6, 0xc7, 0x51, 0xb5, 0xf8, 0x1d, 0x00, 0x00, - 0xff, 0xff, 0x32, 0xe8, 0x23, 0x88, 0x24, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index 791ba8dc..82f6e0a2 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -32,7 +33,7 @@ option go_package = "v1"; // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -52,12 +53,19 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index d33e0085..087ee10d 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,7 +30,7 @@ import ( type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -49,6 +50,13 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -57,7 +65,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index 6f3999a9..4cfb9d3e 100644 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -28,11 +28,12 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -41,7 +42,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go index 09bfc377..63bfe640 100644 --- a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1 import ( + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,6 +30,11 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(corev1.PreemptionPolicy) + **out = **in + } return } @@ -54,7 +60,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 0a0d481a..05bff0ff 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -17,26 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. +package v1alpha1 - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +import ( + fmt "fmt" - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1alpha1 + io "io" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_f033641dd0b95dce) +} + +var fileDescriptor_f033641dd0b95dce = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, + 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, + 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, + 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, + 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, + 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, + 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, + 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, + 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, + 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, + 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, + 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, + 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, + 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, + 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, + 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, + 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, + 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, + 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, + 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, + 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, + 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, + 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, + 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, + 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, + 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, + 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, + 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, + 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, + 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -72,40 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -113,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -158,10 +271,17 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -176,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -193,10 +306,11 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -205,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -235,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -263,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -272,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -293,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -312,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -332,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -342,11 +464,47 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -356,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -383,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -411,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -441,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -450,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -467,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -533,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -565,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -583,39 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 447 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x33, 0x5d, 0x0b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0xe8, 0x61, 0x36, 0xac, 0x97, 0x5c, - 0x76, 0xc6, 0x2e, 0x2a, 0x82, 0xb7, 0xb8, 0xb0, 0x08, 0x8a, 0x92, 0x83, 0x07, 0xf1, 0xe0, 0x24, - 0x79, 0x37, 0x1d, 0x9b, 0x64, 0xc2, 0xcc, 0x24, 0xb0, 0x37, 0xcf, 0x9e, 0xfc, 0x52, 0x42, 0x8f, - 0x7b, 0xdc, 0xd3, 0x62, 0xe3, 0x17, 0x91, 0xa4, 0x69, 0xd3, 0x5a, 0xfc, 0x73, 0xcb, 0x3c, 0xef, - 0xef, 0x79, 0xe6, 0xcd, 0xc3, 0xe0, 0x8b, 0xc5, 0x73, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, - 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, - 0xa6, 0x22, 0x4f, 0x58, 0x35, 0xe3, 0x69, 0x31, 0xe7, 0x33, 0x96, 0x40, 0x0e, 0x8a, 0x1b, 0x88, - 0x69, 0xa1, 0xa4, 0x91, 0x36, 0x59, 0xf3, 0x94, 0x17, 0x82, 0xf6, 0x3c, 0xdd, 0xf0, 0xd3, 0xd3, - 0x44, 0x98, 0x79, 0x19, 0xd2, 0x48, 0x66, 0x2c, 0x91, 0x89, 0x64, 0xad, 0x2d, 0x2c, 0x2f, 0xdb, - 0x53, 0x7b, 0x68, 0xbf, 0xd6, 0x71, 0xd3, 0x27, 0xfd, 0xf5, 0x19, 0x8f, 0xe6, 0x22, 0x07, 0x75, - 0xc5, 0x8a, 0x45, 0xd2, 0x08, 0x9a, 0x65, 0x60, 0x38, 0xab, 0x0e, 0x96, 0x98, 0xb2, 0x3f, 0xb9, - 0x54, 0x99, 0x1b, 0x91, 0xc1, 0x81, 0xe1, 0xd9, 0xbf, 0x0c, 0xcd, 0xaf, 0x64, 0xfc, 0x77, 0xdf, - 0xc9, 0xd7, 0x01, 0x9e, 0xbc, 0x53, 0x42, 0x2a, 0x61, 0xae, 0x5e, 0xa6, 0x5c, 0x6b, 0xfb, 0x13, - 0x1e, 0x35, 0x5b, 0xc5, 0xdc, 0x70, 0x07, 0xb9, 0xc8, 0x1b, 0x9f, 0x3d, 0xa6, 0x7d, 0x25, 0xdb, - 0x70, 0x5a, 0x2c, 0x92, 0x46, 0xd0, 0xb4, 0xa1, 0x69, 0x35, 0xa3, 0x6f, 0xc3, 0xcf, 0x10, 0x99, - 0x37, 0x60, 0xb8, 0x6f, 0x2f, 0x6f, 0x8f, 0xad, 0xfa, 0xf6, 0x18, 0xf7, 0x5a, 0xb0, 0x4d, 0xb5, - 0x1f, 0xe1, 0x61, 0xc5, 0xd3, 0x12, 0x9c, 0x81, 0x8b, 0xbc, 0xa1, 0x3f, 0xe9, 0xe0, 0xe1, 0xfb, - 0x46, 0x0c, 0xd6, 0x33, 0xfb, 0x05, 0x9e, 0x24, 0xa9, 0x0c, 0x79, 0x7a, 0x0e, 0x97, 0xbc, 0x4c, - 0x8d, 0x73, 0xe4, 0x22, 0x6f, 0xe4, 0x3f, 0xec, 0xe0, 0xc9, 0xc5, 0xee, 0x30, 0xd8, 0x67, 0xed, - 0xa7, 0x78, 0x1c, 0x83, 0x8e, 0x94, 0x28, 0x8c, 0x90, 0xb9, 0x73, 0xc7, 0x45, 0xde, 0x5d, 0xff, - 0x41, 0x67, 0x1d, 0x9f, 0xf7, 0xa3, 0x60, 0x97, 0x3b, 0xf9, 0x8e, 0xf0, 0xfd, 0xbd, 0x32, 0x5e, - 0x0b, 0x6d, 0xec, 0x8f, 0x07, 0x85, 0xd0, 0xff, 0x2b, 0xa4, 0x71, 0xb7, 0x75, 0xdc, 0xeb, 0x6e, - 0x1e, 0x6d, 0x94, 0x9d, 0x32, 0x02, 0x3c, 0x14, 0x06, 0x32, 0xed, 0x0c, 0xdc, 0x23, 0x6f, 0x7c, - 0x76, 0x4a, 0xff, 0xfe, 0xfc, 0xe8, 0xde, 0x7e, 0x7d, 0x77, 0xaf, 0x9a, 0x8c, 0x60, 0x1d, 0xe5, - 0xd3, 0xe5, 0x8a, 0x58, 0xd7, 0x2b, 0x62, 0xdd, 0xac, 0x88, 0xf5, 0xa5, 0x26, 0x68, 0x59, 0x13, - 0x74, 0x5d, 0x13, 0x74, 0x53, 0x13, 0xf4, 0xa3, 0x26, 0xe8, 0xdb, 0x4f, 0x62, 0x7d, 0x18, 0x6d, - 0x32, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xab, 0x20, 0x12, 0x63, 0x3c, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index bfd85f55..682fb873 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -33,7 +34,7 @@ option go_package = "v1alpha1"; // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -53,12 +54,19 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 6103ea4e..86a2c513 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -30,7 +31,7 @@ import ( type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -50,6 +51,13 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -58,7 +66,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index 89565012..63a9a353 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -28,11 +28,12 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -41,7 +42,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index fe0c8604..03928239 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,6 +30,11 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(v1.PreemptionPolicy) + **out = **in + } return } @@ -54,7 +60,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index ddb28544..198fcd02 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -17,26 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. +package v1beta1 - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +import ( + fmt "fmt" - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1beta1 + io "io" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_6cd406dede2d3f42) +} + +var fileDescriptor_6cd406dede2d3f42 = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, + 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, + 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, + 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, + 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, + 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, + 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, + 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, + 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, + 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, + 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, + 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, + 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, + 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, + 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, + 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, + 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, + 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, + 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, + 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, + 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, + 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, + 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, + 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, + 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, + 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, + 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, + 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, + 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, + 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -72,40 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -113,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -158,10 +271,17 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -176,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -193,10 +306,11 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -205,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -235,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -263,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -272,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -293,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -312,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -332,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -342,11 +464,47 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -356,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -383,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -411,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -441,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -450,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -467,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -533,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -565,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -583,39 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 448 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8b, 0xd3, 0x40, - 0x18, 0xc5, 0x33, 0x5d, 0x8b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0x28, 0x38, 0x1b, 0xd6, 0x4b, 0x0e, - 0xee, 0x8c, 0x5d, 0x54, 0x04, 0x6f, 0x71, 0x51, 0x04, 0x45, 0xcd, 0xc1, 0x83, 0x78, 0x70, 0x92, - 0x7c, 0x9b, 0x8e, 0x4d, 0x32, 0x61, 0x66, 0x12, 0xd8, 0x9b, 0x67, 0x4f, 0xfe, 0x51, 0x1e, 0x7a, - 0xdc, 0xe3, 0x9e, 0x16, 0x1b, 0xff, 0x11, 0x49, 0x1a, 0x37, 0xad, 0x45, 0xdd, 0x5b, 0xe6, 0x7d, - 0xbf, 0xf7, 0xe6, 0xcb, 0x63, 0xf0, 0xf3, 0xc5, 0x13, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, - 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, - 0xa6, 0x22, 0x4f, 0x58, 0x35, 0x0b, 0xc1, 0xf0, 0x19, 0x4b, 0x20, 0x07, 0xc5, 0x0d, 0xc4, 0xb4, - 0x50, 0xd2, 0x48, 0xfb, 0xee, 0x1a, 0xa7, 0xbc, 0x10, 0xb4, 0xc7, 0x69, 0x87, 0x4f, 0x0f, 0x13, - 0x61, 0xe6, 0x65, 0x48, 0x23, 0x99, 0xb1, 0x44, 0x26, 0x92, 0xb5, 0xae, 0xb0, 0x3c, 0x69, 0x4f, - 0xed, 0xa1, 0xfd, 0x5a, 0xa7, 0x4d, 0x1f, 0xf6, 0x97, 0x67, 0x3c, 0x9a, 0x8b, 0x1c, 0xd4, 0x29, - 0x2b, 0x16, 0x49, 0x23, 0x68, 0x96, 0x81, 0xe1, 0xac, 0xda, 0xd9, 0x61, 0xca, 0xfe, 0xe6, 0x52, - 0x65, 0x6e, 0x44, 0x06, 0x3b, 0x86, 0xc7, 0xff, 0x33, 0x34, 0x7f, 0x92, 0xf1, 0x3f, 0x7d, 0x07, - 0x5f, 0x07, 0x78, 0xf2, 0x56, 0x09, 0xa9, 0x84, 0x39, 0x7d, 0x96, 0x72, 0xad, 0xed, 0x4f, 0x78, - 0xd4, 0x6c, 0x15, 0x73, 0xc3, 0x1d, 0xe4, 0x22, 0x6f, 0x7c, 0xf4, 0x80, 0xf6, 0x8d, 0x5c, 0x86, - 0xd3, 0x62, 0x91, 0x34, 0x82, 0xa6, 0x0d, 0x4d, 0xab, 0x19, 0x7d, 0x13, 0x7e, 0x86, 0xc8, 0xbc, - 0x06, 0xc3, 0x7d, 0x7b, 0x79, 0xb1, 0x6f, 0xd5, 0x17, 0xfb, 0xb8, 0xd7, 0x82, 0xcb, 0x54, 0xfb, - 0x1e, 0x1e, 0x56, 0x3c, 0x2d, 0xc1, 0x19, 0xb8, 0xc8, 0x1b, 0xfa, 0x93, 0x0e, 0x1e, 0xbe, 0x6f, - 0xc4, 0x60, 0x3d, 0xb3, 0x9f, 0xe2, 0x49, 0x92, 0xca, 0x90, 0xa7, 0xc7, 0x70, 0xc2, 0xcb, 0xd4, - 0x38, 0x7b, 0x2e, 0xf2, 0x46, 0xfe, 0x9d, 0x0e, 0x9e, 0xbc, 0xd8, 0x1c, 0x06, 0xdb, 0xac, 0xfd, - 0x08, 0x8f, 0x63, 0xd0, 0x91, 0x12, 0x85, 0x11, 0x32, 0x77, 0xae, 0xb9, 0xc8, 0xbb, 0xe1, 0xdf, - 0xee, 0xac, 0xe3, 0xe3, 0x7e, 0x14, 0x6c, 0x72, 0x07, 0xdf, 0x11, 0xbe, 0xb5, 0x55, 0xc6, 0x2b, - 0xa1, 0x8d, 0xfd, 0x71, 0xa7, 0x10, 0x7a, 0xb5, 0x42, 0x1a, 0x77, 0x5b, 0xc7, 0xcd, 0xee, 0xe6, - 0xd1, 0x6f, 0x65, 0xa3, 0x8c, 0x77, 0x78, 0x28, 0x0c, 0x64, 0xda, 0x19, 0xb8, 0x7b, 0xde, 0xf8, - 0xe8, 0x3e, 0xfd, 0xe7, 0xeb, 0xa3, 0x5b, 0xeb, 0xf5, 0xd5, 0xbd, 0x6c, 0x22, 0x82, 0x75, 0x92, - 0x7f, 0xb8, 0x5c, 0x11, 0xeb, 0x6c, 0x45, 0xac, 0xf3, 0x15, 0xb1, 0xbe, 0xd4, 0x04, 0x2d, 0x6b, - 0x82, 0xce, 0x6a, 0x82, 0xce, 0x6b, 0x82, 0x7e, 0xd4, 0x04, 0x7d, 0xfb, 0x49, 0xac, 0x0f, 0xd7, - 0xbb, 0xc8, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x41, 0x74, 0x8a, 0x60, 0x38, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 3f15dc1d..2582891b 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -53,6 +54,13 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index 2f6b3c96..f806ecd4 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -50,6 +51,13 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index e99ed8fc..ffded9df 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -28,11 +28,12 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go index 6f68e4ac..6e200857 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,6 +30,11 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(v1.PreemptionPolicy) + **out = **in + } return } @@ -54,7 +60,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index c8421310..7469f74a 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -17,29 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto - - It has these top-level messages: - PodPreset - PodPresetList - PodPresetSpec -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{0} +} +func (m *PodPreset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPreset.Merge(m, src) +} +func (m *PodPreset) XXX_Size() int { + return m.Size() +} +func (m *PodPreset) XXX_DiscardUnknown() { + xxx_messageInfo_PodPreset.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPreset proto.InternalMessageInfo + +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{1} +} +func (m *PodPresetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetList.Merge(m, src) +} +func (m *PodPresetList) XXX_Size() int { + return m.Size() +} +func (m *PodPresetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPresetList proto.InternalMessageInfo -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{2} +} +func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetSpec.Merge(m, src) +} +func (m *PodPresetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodPresetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) +} -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo func init() { proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) +} + +var fileDescriptor_48fab0a6ea4b79ce = []byte{ + // 542 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, + 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, + 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, + 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, + 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, + 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, + 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, + 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, + 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, + 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, + 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, + 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, + 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, + 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, + 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, + 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, + 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, + 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, + 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, + 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, + 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, + 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, + 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, + 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, + 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, + 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, + 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, + 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, + 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, + 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, + 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, + 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, + 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, + 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, +} + func (m *PodPreset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -80,33 +187,42 @@ func (m *PodPreset) Marshal() (dAtA []byte, err error) { } func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodPresetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -114,37 +230,46 @@ func (m *PodPresetList) Marshal() (dAtA []byte, err error) { } func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,79 +277,99 @@ func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n4, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } } - i += n4 - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PodPreset) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -235,6 +380,9 @@ func (m *PodPreset) Size() (n int) { } func (m *PodPresetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -249,6 +397,9 @@ func (m *PodPresetList) Size() (n int) { } func (m *PodPresetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Selector.Size() @@ -281,14 +432,7 @@ func (m *PodPresetSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -298,7 +442,7 @@ func (this *PodPreset) String() string { return "nil" } s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -308,9 +452,14 @@ func (this *PodPresetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodPreset{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -319,12 +468,32 @@ func (this *PodPresetSpec) String() string { if this == nil { return "nil" } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeMounts += "}" s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_api_core_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_api_core_v1.Volume", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_api_core_v1.VolumeMount", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, `}`, }, "") return s @@ -352,7 +521,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -380,7 +549,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -389,6 +558,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -410,7 +582,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -419,6 +591,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -435,6 +610,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -462,7 +640,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -490,7 +668,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -499,6 +677,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -520,7 +701,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -529,6 +710,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -546,6 +730,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -573,7 +760,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -601,7 +788,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -610,6 +797,9 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -631,7 +821,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -640,10 +830,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) + m.Env = append(m.Env, v11.EnvVar{}) if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -662,7 +855,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -671,10 +864,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, k8s_io_api_core_v1.EnvFromSource{}) + m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -693,7 +889,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -702,10 +898,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, k8s_io_api_core_v1.Volume{}) + m.Volumes = append(m.Volumes, v11.Volume{}) if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -724,7 +923,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -733,10 +932,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, k8s_io_api_core_v1.VolumeMount{}) + m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -750,6 +952,9 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -816,10 +1021,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -848,6 +1056,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -866,45 +1077,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index 6397a88a..ed6c31a3 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -56,7 +56,7 @@ func (in *PodPreset) DeepCopyObject() runtime.Object { func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodPreset, len(*in)) diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index e4b29311..782f5069 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -17,36 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" -import strings "strings" -import reflect "reflect" - -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -59,52 +46,319 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{0} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_StorageClass proto.InternalMessageInfo -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{1} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{2} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{3} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{4} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{5} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{6} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{7} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptor_3b530c1983504d8d) +} + +var fileDescriptor_3b530c1983504d8d = []byte{ + // 1018 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0xc5, + 0x1b, 0xcf, 0xc6, 0x79, 0x71, 0xc6, 0xc9, 0xff, 0x9c, 0xf9, 0x07, 0x30, 0x2e, 0xec, 0xc8, 0x14, + 0x98, 0x83, 0xdb, 0xbd, 0x84, 0x03, 0x9d, 0x90, 0x40, 0xf2, 0x82, 0x25, 0x4e, 0x8a, 0xef, 0xa2, + 0x49, 0x38, 0x21, 0x44, 0xc1, 0x64, 0xf7, 0x61, 0xb3, 0x67, 0xef, 0xce, 0x32, 0x33, 0x36, 0xa4, + 0xa3, 0xa2, 0x43, 0x82, 0x96, 0x8f, 0x42, 0x49, 0x15, 0xba, 0x13, 0xd5, 0x55, 0x16, 0x59, 0x6a, + 0xbe, 0x40, 0x2a, 0x34, 0xb3, 0x13, 0x7b, 0x63, 0x6f, 0xc0, 0x69, 0xae, 0xf3, 0xf3, 0xf2, 0xfb, + 0x3d, 0xef, 0xb3, 0x46, 0x1f, 0xf5, 0x1f, 0x0a, 0x3b, 0x64, 0x4e, 0x7f, 0x78, 0x02, 0x3c, 0x06, + 0x09, 0xc2, 0x19, 0x41, 0xec, 0x33, 0xee, 0x18, 0x03, 0x4d, 0x42, 0x47, 0x48, 0xc6, 0x69, 0x00, + 0xce, 0x68, 0xcf, 0x09, 0x20, 0x06, 0x4e, 0x25, 0xf8, 0x76, 0xc2, 0x99, 0x64, 0xf8, 0x95, 0xcc, + 0xcd, 0xa6, 0x49, 0x68, 0x1b, 0x37, 0x7b, 0xb4, 0x57, 0xbf, 0x17, 0x84, 0xf2, 0x74, 0x78, 0x62, + 0x7b, 0x2c, 0x72, 0x02, 0x16, 0x30, 0x47, 0x7b, 0x9f, 0x0c, 0xbf, 0xd6, 0x92, 0x16, 0xf4, 0xaf, + 0x8c, 0xa5, 0xde, 0xca, 0x05, 0xf3, 0x18, 0x2f, 0x8a, 0x54, 0x7f, 0x30, 0xf5, 0x89, 0xa8, 0x77, + 0x1a, 0xc6, 0xc0, 0xcf, 0x9c, 0xa4, 0x1f, 0x28, 0x85, 0x70, 0x22, 0x90, 0xb4, 0x08, 0xe5, 0xdc, + 0x84, 0xe2, 0xc3, 0x58, 0x86, 0x11, 0xcc, 0x01, 0xde, 0xff, 0x2f, 0x80, 0xf0, 0x4e, 0x21, 0xa2, + 0xb3, 0xb8, 0xd6, 0x8f, 0x6b, 0x68, 0xf3, 0x28, 0x6b, 0xc0, 0xc7, 0x03, 0x2a, 0x04, 0xfe, 0x0a, + 0x95, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x66, 0xed, 0x5a, 0xed, 0xca, 0xfe, 0x7d, 0x7b, 0xda, 0xac, + 0x09, 0xb7, 0x9d, 0xf4, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x47, 0x7b, 0xf6, 0x93, 0x93, 0x67, + 0xe0, 0xc9, 0x1e, 0x48, 0xea, 0xe2, 0xf3, 0x71, 0x73, 0x29, 0x1d, 0x37, 0xd1, 0x54, 0x47, 0x26, + 0xac, 0xf8, 0x3d, 0x54, 0x49, 0x38, 0x1b, 0x85, 0x22, 0x64, 0x31, 0xf0, 0xda, 0xf2, 0xae, 0xd5, + 0xde, 0x70, 0xff, 0x6f, 0x20, 0x95, 0xc3, 0xa9, 0x89, 0xe4, 0xfd, 0x70, 0x80, 0x50, 0x42, 0x39, + 0x8d, 0x40, 0x02, 0x17, 0xb5, 0xd2, 0x6e, 0xa9, 0x5d, 0xd9, 0x7f, 0xd7, 0x2e, 0x9c, 0xa3, 0x9d, + 0xaf, 0xc8, 0x3e, 0x9c, 0xa0, 0xba, 0xb1, 0xe4, 0x67, 0xd3, 0xec, 0xa6, 0x06, 0x92, 0xa3, 0xc6, + 0x7d, 0xb4, 0xc5, 0xc1, 0x1b, 0xd0, 0x30, 0x3a, 0x64, 0x83, 0xd0, 0x3b, 0xab, 0xad, 0xe8, 0x0c, + 0xbb, 0xe9, 0xb8, 0xb9, 0x45, 0xf2, 0x86, 0xcb, 0x71, 0xf3, 0xfe, 0xfc, 0x06, 0xd8, 0x87, 0xc0, + 0x45, 0x28, 0x24, 0xc4, 0xf2, 0x29, 0x1b, 0x0c, 0x23, 0xb8, 0x86, 0x21, 0xd7, 0xb9, 0xf1, 0x03, + 0xb4, 0x19, 0xb1, 0x61, 0x2c, 0x9f, 0x24, 0x32, 0x64, 0xb1, 0xa8, 0xad, 0xee, 0x96, 0xda, 0x1b, + 0x6e, 0x35, 0x1d, 0x37, 0x37, 0x7b, 0x39, 0x3d, 0xb9, 0xe6, 0x85, 0x0f, 0xd0, 0x0e, 0x1d, 0x0c, + 0xd8, 0xb7, 0x59, 0x80, 0xee, 0x77, 0x09, 0x8d, 0x55, 0x97, 0x6a, 0x6b, 0xbb, 0x56, 0xbb, 0xec, + 0xd6, 0xd2, 0x71, 0x73, 0xa7, 0x53, 0x60, 0x27, 0x85, 0x28, 0xfc, 0x39, 0xda, 0x1e, 0x69, 0x95, + 0x1b, 0xc6, 0x7e, 0x18, 0x07, 0x3d, 0xe6, 0x43, 0x6d, 0x5d, 0x17, 0x7d, 0x37, 0x1d, 0x37, 0xb7, + 0x9f, 0xce, 0x1a, 0x2f, 0x8b, 0x94, 0x64, 0x9e, 0x04, 0x7f, 0x83, 0xb6, 0x75, 0x44, 0xf0, 0x8f, + 0x59, 0xc2, 0x06, 0x2c, 0x08, 0x41, 0xd4, 0xca, 0x7a, 0x74, 0xed, 0xfc, 0xe8, 0x54, 0xeb, 0xd4, + 0xdc, 0x8c, 0xd7, 0xd9, 0x11, 0x0c, 0xc0, 0x93, 0x8c, 0x1f, 0x03, 0x8f, 0xdc, 0xd7, 0xcd, 0xbc, + 0xb6, 0x3b, 0xb3, 0x54, 0x64, 0x9e, 0xbd, 0xfe, 0x21, 0xba, 0x33, 0x33, 0x70, 0x5c, 0x45, 0xa5, + 0x3e, 0x9c, 0xe9, 0x6d, 0xde, 0x20, 0xea, 0x27, 0xde, 0x41, 0xab, 0x23, 0x3a, 0x18, 0x42, 0xb6, + 0x7c, 0x24, 0x13, 0x3e, 0x58, 0x7e, 0x68, 0xb5, 0x7e, 0xb5, 0x50, 0x35, 0xbf, 0x3d, 0x07, 0xa1, + 0x90, 0xf8, 0xcb, 0xb9, 0x9b, 0xb0, 0x17, 0xbb, 0x09, 0x85, 0xd6, 0x17, 0x51, 0x35, 0x35, 0x94, + 0xaf, 0x34, 0xb9, 0x7b, 0xf8, 0x14, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5b, 0xd6, 0x8d, 0x79, 0x63, + 0x81, 0x9d, 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x23, 0x85, 0x24, 0x19, 0x41, 0xeb, 0x97, 0x65, 0x54, + 0xcd, 0xe6, 0xd2, 0x91, 0x92, 0x7a, 0xa7, 0x11, 0xc4, 0xf2, 0x25, 0x1c, 0x74, 0x0f, 0xad, 0x88, + 0x04, 0x3c, 0xdd, 0xcc, 0xca, 0xfe, 0xdb, 0x37, 0xe4, 0x3f, 0x9b, 0xd8, 0x51, 0x02, 0x9e, 0xbb, + 0x69, 0x88, 0x57, 0x94, 0x44, 0x34, 0x0d, 0xfe, 0x0c, 0xad, 0x09, 0x49, 0xe5, 0x50, 0x1d, 0xb9, + 0x22, 0xbc, 0xb7, 0x28, 0xa1, 0x06, 0xb9, 0xff, 0x33, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xeb, + 0x37, 0x0b, 0xed, 0xcc, 0x42, 0x5e, 0xc2, 0x74, 0x0f, 0xae, 0x4f, 0xf7, 0xcd, 0x05, 0x8b, 0xb9, + 0x61, 0xc2, 0x7f, 0x58, 0xe8, 0xd5, 0xb9, 0xba, 0xd9, 0x90, 0x7b, 0xa0, 0xde, 0x84, 0x64, 0xe6, + 0xe5, 0x79, 0x4c, 0x23, 0xc8, 0xd6, 0x3e, 0x7b, 0x13, 0x0e, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0x9f, + 0xa1, 0x6a, 0x18, 0x0f, 0xc2, 0x18, 0x32, 0xdd, 0xd1, 0x74, 0xbe, 0x85, 0x87, 0x3b, 0xcb, 0xac, + 0x87, 0xbb, 0x93, 0x8e, 0x9b, 0xd5, 0x47, 0x33, 0x2c, 0x64, 0x8e, 0xb7, 0xf5, 0x7b, 0xc1, 0x64, + 0x94, 0x01, 0xbf, 0x83, 0xca, 0x54, 0x6b, 0x80, 0x9b, 0x32, 0x26, 0x9d, 0xee, 0x18, 0x3d, 0x99, + 0x78, 0xe8, 0xbd, 0xd1, 0xad, 0x30, 0x89, 0x2e, 0xbc, 0x37, 0x1a, 0x94, 0xdb, 0x1b, 0x2d, 0x13, + 0x43, 0xa6, 0x92, 0x88, 0x99, 0x9f, 0xf5, 0xb2, 0x74, 0x3d, 0x89, 0xc7, 0x46, 0x4f, 0x26, 0x1e, + 0xad, 0xbf, 0x4b, 0x05, 0x03, 0xd2, 0x0b, 0x98, 0xab, 0xc6, 0xd7, 0xd5, 0x94, 0xe7, 0xaa, 0xf1, + 0x27, 0xd5, 0xf8, 0xf8, 0x67, 0x0b, 0x61, 0x3a, 0xa1, 0xe8, 0x5d, 0x2d, 0x68, 0xb6, 0x45, 0xdd, + 0x5b, 0x9d, 0x84, 0xdd, 0x99, 0xe3, 0xc9, 0xbe, 0x84, 0x75, 0x13, 0x1f, 0xcf, 0x3b, 0x90, 0x82, + 0xe0, 0xd8, 0x47, 0x95, 0x4c, 0xdb, 0xe5, 0x9c, 0x71, 0x73, 0x9e, 0xad, 0x7f, 0xcd, 0x45, 0x7b, + 0xba, 0x0d, 0xf5, 0x65, 0xef, 0x4c, 0xa1, 0x97, 0xe3, 0x66, 0x25, 0x67, 0x27, 0x79, 0x5a, 0x15, + 0xc5, 0x87, 0x69, 0x94, 0x95, 0xdb, 0x45, 0xf9, 0x04, 0x6e, 0x8e, 0x92, 0xa3, 0xad, 0x77, 0xd1, + 0x6b, 0x37, 0xb4, 0xe5, 0x56, 0xdf, 0x8b, 0x1f, 0x2c, 0x94, 0x8f, 0x81, 0x0f, 0xd0, 0x8a, 0xfa, + 0xbb, 0x65, 0x1e, 0x92, 0xbb, 0x8b, 0x3d, 0x24, 0xc7, 0x61, 0x04, 0xd3, 0xa7, 0x50, 0x49, 0x44, + 0xb3, 0xe0, 0xb7, 0xd0, 0x7a, 0x04, 0x42, 0xd0, 0xc0, 0x44, 0x76, 0xef, 0x18, 0xa7, 0xf5, 0x5e, + 0xa6, 0x26, 0x57, 0x76, 0xb7, 0x7d, 0x7e, 0xd1, 0x58, 0x7a, 0x7e, 0xd1, 0x58, 0x7a, 0x71, 0xd1, + 0x58, 0xfa, 0x3e, 0x6d, 0x58, 0xe7, 0x69, 0xc3, 0x7a, 0x9e, 0x36, 0xac, 0x17, 0x69, 0xc3, 0xfa, + 0x33, 0x6d, 0x58, 0x3f, 0xfd, 0xd5, 0x58, 0xfa, 0x62, 0x79, 0xb4, 0xf7, 0x4f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xe2, 0xd4, 0x42, 0x3d, 0x3c, 0x0b, 0x00, 0x00, +} + func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -112,100 +366,108 @@ func (m *StorageClass) Marshal() (dAtA []byte, err error) { } func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a } if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ + i-- if *m.AllowVolumeExpansion { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x30 } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StorageClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -213,37 +475,46 @@ func (m *StorageClassList) Marshal() (dAtA []byte, err error) { } func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -251,41 +522,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -293,37 +575,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -331,23 +622,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -355,33 +664,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n7, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -389,67 +707,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n8, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n9, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -457,35 +786,48 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n10, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x12 - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -527,6 +869,9 @@ func (m *StorageClass) Size() (n int) { } func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -541,6 +886,9 @@ func (m *StorageClassList) Size() (n int) { } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -553,6 +901,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -567,16 +918,26 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -589,6 +950,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -612,6 +976,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -622,14 +989,7 @@ func (m *VolumeError) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -638,6 +998,11 @@ func (this *StorageClass) String() string { if this == nil { return "nil" } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" keysForParameters := make([]string, 0, len(this.Parameters)) for k := range this.Parameters { keysForParameters = append(keysForParameters, k) @@ -649,14 +1014,14 @@ func (this *StorageClass) String() string { } mapStringForParameters += "}" s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, `Parameters:` + mapStringForParameters + `,`, `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, `}`, }, "") return s @@ -665,9 +1030,14 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -677,7 +1047,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -688,9 +1058,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -701,6 +1076,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -734,8 +1110,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -745,7 +1121,7 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") @@ -774,7 +1150,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -802,7 +1178,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -811,6 +1187,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -832,7 +1211,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -842,6 +1221,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -861,7 +1243,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -870,6 +1252,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -890,7 +1275,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -907,7 +1292,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -917,6 +1302,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -933,7 +1321,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -943,6 +1331,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -979,7 +1370,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -989,6 +1380,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1009,7 +1403,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1019,6 +1413,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1038,7 +1435,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1059,7 +1456,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1069,6 +1466,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1089,7 +1489,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1098,10 +1498,13 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1115,6 +1518,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1142,7 +1548,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1170,7 +1576,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1179,6 +1585,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1200,7 +1609,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1209,6 +1618,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1226,6 +1638,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1253,7 +1668,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1281,7 +1696,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1290,6 +1705,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1311,7 +1729,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1320,6 +1738,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1341,7 +1762,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1350,6 +1771,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1366,6 +1790,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1393,7 +1820,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1421,7 +1848,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1430,6 +1857,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1451,7 +1881,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1460,6 +1890,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1477,6 +1910,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1504,7 +1940,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1532,7 +1968,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1542,12 +1978,51 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1557,6 +2032,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1584,7 +2062,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1612,7 +2090,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1622,6 +2100,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1641,7 +2122,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1650,6 +2131,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1671,7 +2155,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1681,6 +2165,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1695,6 +2182,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1722,7 +2212,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1750,7 +2240,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1770,7 +2260,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1779,6 +2269,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1799,7 +2292,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1816,7 +2309,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1826,6 +2319,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1842,7 +2338,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1852,6 +2348,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1888,7 +2387,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1897,6 +2396,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1921,7 +2423,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1930,6 +2432,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1949,6 +2454,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1976,7 +2484,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2004,7 +2512,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2013,6 +2521,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2034,7 +2545,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2044,6 +2555,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2058,6 +2572,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2124,10 +2641,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2156,6 +2676,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2174,73 +2697,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 984 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0x45, - 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x97, 0x0c, 0x01, 0x8c, 0x0b, 0x3b, 0x32, 0x05, 0xe6, - 0xe0, 0x76, 0x2f, 0xe1, 0x40, 0x27, 0x24, 0x90, 0xbc, 0x60, 0x09, 0xa4, 0xf8, 0x2e, 0x9a, 0x84, - 0x13, 0x42, 0x14, 0x4c, 0x76, 0xdf, 0xdb, 0x2c, 0xf6, 0xee, 0x2c, 0x33, 0x63, 0x43, 0x3a, 0x2a, - 0x3a, 0x24, 0x68, 0xf9, 0x29, 0x94, 0x54, 0xa1, 0xbb, 0xf2, 0x2a, 0x8b, 0x2c, 0x35, 0x7f, 0x20, - 0x15, 0x9a, 0xd9, 0x89, 0xbd, 0xb1, 0xd7, 0x9c, 0xd3, 0x5c, 0xe7, 0xf7, 0xe3, 0x79, 0xde, 0xef, - 0x59, 0xa3, 0x4f, 0x7a, 0x0f, 0x85, 0x1d, 0x32, 0xa7, 0x37, 0x38, 0x05, 0x1e, 0x83, 0x04, 0xe1, - 0x0c, 0x21, 0xf6, 0x19, 0x77, 0x8c, 0x81, 0x26, 0xa1, 0x23, 0x24, 0xe3, 0x34, 0x00, 0x67, 0xb8, - 0xef, 0x04, 0x10, 0x03, 0xa7, 0x12, 0x7c, 0x3b, 0xe1, 0x4c, 0x32, 0xfc, 0x5a, 0xe6, 0x66, 0xd3, - 0x24, 0xb4, 0x8d, 0x9b, 0x3d, 0xdc, 0xaf, 0xdd, 0x0b, 0x42, 0x79, 0x36, 0x38, 0xb5, 0x3d, 0x16, - 0x39, 0x01, 0x0b, 0x98, 0xa3, 0xbd, 0x4f, 0x07, 0x4f, 0xb5, 0xa4, 0x05, 0xfd, 0x2b, 0x63, 0xa9, - 0x35, 0x73, 0xc1, 0x3c, 0xc6, 0x8b, 0x22, 0xd5, 0x1e, 0x4c, 0x7c, 0x22, 0xea, 0x9d, 0x85, 0x31, - 0xf0, 0x73, 0x27, 0xe9, 0x05, 0x4a, 0x21, 0x9c, 0x08, 0x24, 0x2d, 0x42, 0x39, 0xf3, 0x50, 0x7c, - 0x10, 0xcb, 0x30, 0x82, 0x19, 0xc0, 0x87, 0x2f, 0x02, 0x08, 0xef, 0x0c, 0x22, 0x3a, 0x8d, 0x6b, - 0xfe, 0xb2, 0x86, 0x36, 0x8f, 0xb3, 0x06, 0x7c, 0xda, 0xa7, 0x42, 0xe0, 0x6f, 0x51, 0x59, 0x25, - 0xe5, 0x53, 0x49, 0xab, 0xd6, 0x9e, 0xd5, 0xaa, 0x1c, 0xdc, 0xb7, 0x27, 0xcd, 0x1a, 0x73, 0xdb, - 0x49, 0x2f, 0x50, 0x0a, 0x61, 0x2b, 0x6f, 0x7b, 0xb8, 0x6f, 0x3f, 0x3e, 0xfd, 0x0e, 0x3c, 0xd9, - 0x05, 0x49, 0x5d, 0x7c, 0x31, 0x6a, 0x2c, 0xa5, 0xa3, 0x06, 0x9a, 0xe8, 0xc8, 0x98, 0x15, 0x7f, - 0x80, 0x2a, 0x09, 0x67, 0xc3, 0x50, 0x84, 0x2c, 0x06, 0x5e, 0x5d, 0xde, 0xb3, 0x5a, 0x1b, 0xee, - 0xab, 0x06, 0x52, 0x39, 0x9a, 0x98, 0x48, 0xde, 0x0f, 0x07, 0x08, 0x25, 0x94, 0xd3, 0x08, 0x24, - 0x70, 0x51, 0x2d, 0xed, 0x95, 0x5a, 0x95, 0x83, 0xf7, 0xed, 0xc2, 0x39, 0xda, 0xf9, 0x8a, 0xec, - 0xa3, 0x31, 0xaa, 0x13, 0x4b, 0x7e, 0x3e, 0xc9, 0x6e, 0x62, 0x20, 0x39, 0x6a, 0xdc, 0x43, 0x5b, - 0x1c, 0xbc, 0x3e, 0x0d, 0xa3, 0x23, 0xd6, 0x0f, 0xbd, 0xf3, 0xea, 0x8a, 0xce, 0xb0, 0x93, 0x8e, - 0x1a, 0x5b, 0x24, 0x6f, 0xb8, 0x1a, 0x35, 0xee, 0xcf, 0x6e, 0x80, 0x7d, 0x04, 0x5c, 0x84, 0x42, - 0x42, 0x2c, 0x9f, 0xb0, 0xfe, 0x20, 0x82, 0x1b, 0x18, 0x72, 0x93, 0x1b, 0x3f, 0x40, 0x9b, 0x11, - 0x1b, 0xc4, 0xf2, 0x71, 0x22, 0x43, 0x16, 0x8b, 0xea, 0xea, 0x5e, 0xa9, 0xb5, 0xe1, 0x6e, 0xa7, - 0xa3, 0xc6, 0x66, 0x37, 0xa7, 0x27, 0x37, 0xbc, 0xf0, 0x21, 0xda, 0xa5, 0xfd, 0x3e, 0xfb, 0x21, - 0x0b, 0xd0, 0xf9, 0x31, 0xa1, 0xb1, 0xea, 0x52, 0x75, 0x6d, 0xcf, 0x6a, 0x95, 0xdd, 0x6a, 0x3a, - 0x6a, 0xec, 0xb6, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0xbf, 0x42, 0x3b, 0x43, 0xad, 0x72, 0xc3, 0xd8, - 0x0f, 0xe3, 0xa0, 0xcb, 0x7c, 0xa8, 0xae, 0xeb, 0xa2, 0xef, 0xa6, 0xa3, 0xc6, 0xce, 0x93, 0x69, - 0xe3, 0x55, 0x91, 0x92, 0xcc, 0x92, 0xe0, 0xef, 0xd1, 0x8e, 0x8e, 0x08, 0xfe, 0x09, 0x4b, 0x58, - 0x9f, 0x05, 0x21, 0x88, 0x6a, 0x59, 0x8f, 0xae, 0x95, 0x1f, 0x9d, 0x6a, 0x9d, 0x9a, 0x9b, 0xf1, - 0x3a, 0x3f, 0x86, 0x3e, 0x78, 0x92, 0xf1, 0x13, 0xe0, 0x91, 0xfb, 0xa6, 0x99, 0xd7, 0x4e, 0x7b, - 0x9a, 0x8a, 0xcc, 0xb2, 0xd7, 0x3e, 0x46, 0x77, 0xa6, 0x06, 0x8e, 0xb7, 0x51, 0xa9, 0x07, 0xe7, - 0x7a, 0x9b, 0x37, 0x88, 0xfa, 0x89, 0x77, 0xd1, 0xea, 0x90, 0xf6, 0x07, 0x90, 0x2d, 0x1f, 0xc9, - 0x84, 0x8f, 0x96, 0x1f, 0x5a, 0xcd, 0x3f, 0x2c, 0xb4, 0x9d, 0xdf, 0x9e, 0xc3, 0x50, 0x48, 0xfc, - 0xcd, 0xcc, 0x4d, 0xd8, 0x8b, 0xdd, 0x84, 0x42, 0xeb, 0x8b, 0xd8, 0x36, 0x35, 0x94, 0xaf, 0x35, - 0xb9, 0x7b, 0xf8, 0x1c, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5d, 0xd6, 0x8d, 0x79, 0x6b, 0x81, 0x9d, - 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x17, 0x0a, 0x49, 0x32, 0x82, 0xe6, 0xef, 0xcb, 0x68, 0x3b, 0x9b, - 0x4b, 0x5b, 0x4a, 0xea, 0x9d, 0x45, 0x10, 0xcb, 0x97, 0x70, 0xd0, 0x5d, 0xb4, 0x22, 0x12, 0xf0, - 0x74, 0x33, 0x2b, 0x07, 0xef, 0xce, 0xc9, 0x7f, 0x3a, 0xb1, 0xe3, 0x04, 0x3c, 0x77, 0xd3, 0x10, - 0xaf, 0x28, 0x89, 0x68, 0x1a, 0xfc, 0x25, 0x5a, 0x13, 0x92, 0xca, 0x81, 0x3a, 0x72, 0x45, 0x78, - 0x6f, 0x51, 0x42, 0x0d, 0x72, 0x5f, 0x31, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xf3, 0x4f, 0x0b, - 0xed, 0x4e, 0x43, 0x5e, 0xc2, 0x74, 0x0f, 0x6f, 0x4e, 0xf7, 0xed, 0x05, 0x8b, 0x99, 0x33, 0xe1, - 0xa7, 0xe8, 0xf5, 0x99, 0xb2, 0xd9, 0x80, 0x7b, 0xa0, 0x9e, 0x84, 0x64, 0xea, 0xe1, 0x79, 0x44, - 0x23, 0xc8, 0xb6, 0x3e, 0x7b, 0x12, 0x8e, 0x0a, 0xec, 0xa4, 0x10, 0xd5, 0xfc, 0xab, 0xa0, 0x59, - 0x6a, 0x44, 0xf8, 0x3d, 0x54, 0xa6, 0x5a, 0x03, 0xdc, 0x50, 0x8f, 0x8b, 0x6f, 0x1b, 0x3d, 0x19, - 0x7b, 0xe8, 0x51, 0xea, 0xf4, 0xcc, 0x6e, 0x2c, 0x3c, 0x4a, 0x0d, 0xca, 0x8d, 0x52, 0xcb, 0xc4, - 0x90, 0xa9, 0x24, 0x62, 0xe6, 0x67, 0xf5, 0x95, 0x6e, 0x26, 0xf1, 0xc8, 0xe8, 0xc9, 0xd8, 0xa3, - 0xf9, 0x6f, 0xa9, 0xa0, 0x69, 0x7a, 0x27, 0x72, 0xd5, 0xf8, 0xba, 0x9a, 0xf2, 0x4c, 0x35, 0xfe, - 0xb8, 0x1a, 0x1f, 0xff, 0x66, 0x21, 0x4c, 0xc7, 0x14, 0xdd, 0xeb, 0x9d, 0xc9, 0x06, 0xdb, 0xb9, - 0xd5, 0x96, 0xda, 0xed, 0x19, 0x9e, 0xec, 0xe3, 0x54, 0x33, 0xf1, 0xf1, 0xac, 0x03, 0x29, 0x08, - 0x8e, 0x7d, 0x54, 0xc9, 0xb4, 0x1d, 0xce, 0x19, 0x37, 0x17, 0xd3, 0xfc, 0xdf, 0x5c, 0xb4, 0xa7, - 0x5b, 0x57, 0x1f, 0xdb, 0xf6, 0x04, 0x7a, 0x35, 0x6a, 0x54, 0x72, 0x76, 0x92, 0xa7, 0x55, 0x51, - 0x7c, 0x98, 0x44, 0x59, 0xb9, 0x5d, 0x94, 0xcf, 0x60, 0x7e, 0x94, 0x1c, 0x6d, 0xad, 0x83, 0xde, - 0x98, 0xd3, 0x96, 0x5b, 0x3d, 0xe1, 0x3f, 0x5b, 0x28, 0x1f, 0x03, 0x1f, 0xa2, 0x15, 0xf5, 0x0f, - 0xc8, 0xdc, 0xf6, 0xdd, 0xc5, 0x6e, 0xfb, 0x24, 0x8c, 0x60, 0xf2, 0x3a, 0x29, 0x89, 0x68, 0x16, - 0xfc, 0x0e, 0x5a, 0x8f, 0x40, 0x08, 0x1a, 0x98, 0xc8, 0xee, 0x1d, 0xe3, 0xb4, 0xde, 0xcd, 0xd4, - 0xe4, 0xda, 0xee, 0xb6, 0x2e, 0x2e, 0xeb, 0x4b, 0xcf, 0x2e, 0xeb, 0x4b, 0xcf, 0x2f, 0xeb, 0x4b, - 0x3f, 0xa5, 0x75, 0xeb, 0x22, 0xad, 0x5b, 0xcf, 0xd2, 0xba, 0xf5, 0x3c, 0xad, 0x5b, 0x7f, 0xa7, - 0x75, 0xeb, 0xd7, 0x7f, 0xea, 0x4b, 0x5f, 0x2f, 0x0f, 0xf7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, - 0x85, 0x2a, 0x88, 0xc0, 0xcf, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 7ac6cb2d..4087b7e5 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -36,7 +36,7 @@ option go_package = "v1"; // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -80,7 +80,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -94,7 +94,7 @@ message StorageClassList { // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -112,7 +112,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -128,6 +128,15 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index bd60e102..e42ee5e9 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -115,7 +115,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -136,7 +136,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -166,7 +166,14 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // Placeholder for *VolumeSource to accommodate inline volumes in pods. + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` } // VolumeAttachmentStatus is the status of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index e31dd7f7..78ff19c2 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -45,7 +45,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -55,7 +55,7 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -66,7 +66,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 3157ec67..eb8626e6 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -89,7 +89,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -150,7 +150,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -187,6 +187,11 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(corev1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 0511ccab..42324352 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -17,32 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto - - It has these top-level messages: - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -55,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{0} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{1} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{2} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{3} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{4} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{5} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo func init() { proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") @@ -85,12 +219,69 @@ func init() { proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_10f856db1e670dc4) +} + +var fileDescriptor_10f856db1e670dc4 = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0x0d, 0x1f, 0xd1, 0x2a, 0x82, 0x28, 0x48, 0x4e, 0x95, 0x53, + 0x40, 0x74, 0x4d, 0x0a, 0x42, 0x15, 0xb7, 0x58, 0xed, 0xa1, 0xa2, 0x2d, 0x68, 0x8b, 0x38, 0x00, + 0x07, 0x36, 0xf6, 0xe2, 0xb8, 0x89, 0x3f, 0xe4, 0x5d, 0x47, 0xea, 0x8d, 0x13, 0x67, 0x6e, 0xbc, + 0x01, 0xcf, 0x92, 0x1b, 0x15, 0xa7, 0x9e, 0x22, 0x6a, 0xde, 0x82, 0x0b, 0x68, 0xd7, 0x9b, 0xc4, + 0x24, 0x29, 0xb4, 0xbd, 0x79, 0x66, 0x67, 0x7e, 0x33, 0xf3, 0xdf, 0xf1, 0x82, 0x9d, 0xfe, 0x36, + 0x43, 0x6e, 0x60, 0xf4, 0xe3, 0x2e, 0x8d, 0x7c, 0xca, 0x29, 0x33, 0x86, 0xd4, 0xb7, 0x83, 0xc8, + 0x50, 0x07, 0x24, 0x74, 0x0d, 0xc6, 0x83, 0x88, 0x38, 0xd4, 0x18, 0xb6, 0xc9, 0x20, 0xec, 0x91, + 0xb6, 0xe1, 0x50, 0x9f, 0x46, 0x84, 0x53, 0x1b, 0x85, 0x51, 0xc0, 0x03, 0x78, 0x2f, 0x0d, 0x46, + 0x24, 0x74, 0x91, 0x0a, 0x46, 0x93, 0xe0, 0xfa, 0xa6, 0xe3, 0xf2, 0x5e, 0xdc, 0x45, 0x56, 0xe0, + 0x19, 0x4e, 0xe0, 0x04, 0x86, 0xcc, 0xe9, 0xc6, 0x1f, 0xa4, 0x25, 0x0d, 0xf9, 0x95, 0xb2, 0xea, + 0xcd, 0x4c, 0x61, 0x2b, 0x88, 0x44, 0xd5, 0xf9, 0x7a, 0xf5, 0x27, 0xb3, 0x18, 0x8f, 0x58, 0x3d, + 0xd7, 0xa7, 0xd1, 0x89, 0x11, 0xf6, 0x1d, 0xe1, 0x60, 0x86, 0x47, 0x39, 0x59, 0x96, 0x65, 0x5c, + 0x94, 0x15, 0xc5, 0x3e, 0x77, 0x3d, 0xba, 0x90, 0xf0, 0xf4, 0x7f, 0x09, 0xcc, 0xea, 0x51, 0x8f, + 0xcc, 0xe7, 0x35, 0xbf, 0xe6, 0x41, 0xe5, 0x75, 0x30, 0x88, 0x3d, 0xda, 0xe1, 0x9c, 0x58, 0x3d, + 0x8f, 0xfa, 0x1c, 0xbe, 0x07, 0x25, 0xd1, 0x98, 0x4d, 0x38, 0xa9, 0x69, 0x1b, 0x5a, 0xab, 0xbc, + 0xf5, 0x08, 0xcd, 0x64, 0x9b, 0xf2, 0x51, 0xd8, 0x77, 0x84, 0x83, 0x21, 0x11, 0x8d, 0x86, 0x6d, + 0xf4, 0xa2, 0x7b, 0x4c, 0x2d, 0x7e, 0x40, 0x39, 0x31, 0xe1, 0x68, 0xdc, 0xc8, 0x25, 0xe3, 0x06, + 0x98, 0xf9, 0xf0, 0x94, 0x0a, 0x8f, 0x40, 0x91, 0x85, 0xd4, 0xaa, 0xe5, 0x25, 0xbd, 0x8d, 0xfe, + 0x71, 0x29, 0x68, 0xbe, 0xbd, 0xa3, 0x90, 0x5a, 0xe6, 0x0d, 0x85, 0x2f, 0x0a, 0x0b, 0x4b, 0x18, + 0x7c, 0x0b, 0x56, 0x19, 0x27, 0x3c, 0x66, 0xb5, 0x82, 0xc4, 0x3e, 0xbe, 0x1a, 0x56, 0xa6, 0x9a, + 0xb7, 0x14, 0x78, 0x35, 0xb5, 0xb1, 0x42, 0x36, 0x47, 0x1a, 0xa8, 0xce, 0xa7, 0xec, 0xbb, 0x8c, + 0xc3, 0x77, 0x0b, 0x62, 0xa1, 0xcb, 0x89, 0x25, 0xb2, 0xa5, 0x54, 0x15, 0x55, 0xb2, 0x34, 0xf1, + 0x64, 0x84, 0xc2, 0x60, 0xc5, 0xe5, 0xd4, 0x63, 0xb5, 0xfc, 0x46, 0xa1, 0x55, 0xde, 0xda, 0xbc, + 0xd2, 0x48, 0xe6, 0x4d, 0x45, 0x5e, 0xd9, 0x13, 0x0c, 0x9c, 0xa2, 0x9a, 0xdf, 0x35, 0x70, 0x67, + 0x61, 0xfa, 0x20, 0x8e, 0x2c, 0x0a, 0xf7, 0x41, 0x35, 0xa4, 0x11, 0x73, 0x19, 0xa7, 0x3e, 0x4f, + 0x63, 0x0e, 0x89, 0x47, 0xe5, 0x60, 0xeb, 0x66, 0x2d, 0x19, 0x37, 0xaa, 0x2f, 0x97, 0x9c, 0xe3, + 0xa5, 0x59, 0xf0, 0x18, 0x54, 0x5c, 0x7f, 0xe0, 0xfa, 0x34, 0xf5, 0x1d, 0xcd, 0x6e, 0xbc, 0x95, + 0x9d, 0x43, 0xfc, 0x3a, 0x42, 0x90, 0x79, 0xb2, 0xbc, 0xe8, 0x6a, 0x32, 0x6e, 0x54, 0xf6, 0xe6, + 0x28, 0x78, 0x81, 0xdb, 0xfc, 0xb6, 0xe4, 0x7e, 0xc4, 0x01, 0x7c, 0x08, 0x4a, 0x44, 0x7a, 0x68, + 0xa4, 0xc6, 0x98, 0xea, 0xdd, 0x51, 0x7e, 0x3c, 0x8d, 0x90, 0x3b, 0x24, 0xa5, 0x50, 0x8d, 0x5e, + 0x71, 0x87, 0x64, 0x6a, 0x66, 0x87, 0xa4, 0x8d, 0x15, 0x52, 0xb4, 0xe2, 0x07, 0x76, 0xaa, 0x68, + 0xe1, 0xef, 0x56, 0x0e, 0x95, 0x1f, 0x4f, 0x23, 0x9a, 0xbf, 0x0b, 0x4b, 0xae, 0x49, 0x2e, 0x63, + 0x66, 0x26, 0x5b, 0xce, 0x54, 0x5a, 0x98, 0xc9, 0x9e, 0xce, 0x64, 0xc3, 0x2f, 0x1a, 0x80, 0x64, + 0x8a, 0x38, 0x98, 0x2c, 0x6b, 0xba, 0x51, 0xcf, 0xaf, 0xf1, 0x93, 0xa0, 0xce, 0x02, 0x6d, 0xd7, + 0xe7, 0xd1, 0x89, 0x59, 0x57, 0x5d, 0xc0, 0xc5, 0x00, 0xbc, 0xa4, 0x05, 0x78, 0x0c, 0xca, 0xa9, + 0x77, 0x37, 0x8a, 0x82, 0x48, 0xfd, 0xb6, 0xad, 0x4b, 0x74, 0x24, 0xe3, 0x4d, 0x3d, 0x19, 0x37, + 0xca, 0x9d, 0x19, 0xe0, 0xd7, 0xb8, 0x51, 0xce, 0x9c, 0xe3, 0x2c, 0x5c, 0xd4, 0xb2, 0xe9, 0xac, + 0x56, 0xf1, 0x3a, 0xb5, 0x76, 0xe8, 0xc5, 0xb5, 0x32, 0xf0, 0xfa, 0x2e, 0xb8, 0x7b, 0x81, 0x44, + 0xb0, 0x02, 0x0a, 0x7d, 0x7a, 0x92, 0x6e, 0x22, 0x16, 0x9f, 0xb0, 0x0a, 0x56, 0x86, 0x64, 0x10, + 0xa7, 0x1b, 0xb7, 0x8e, 0x53, 0xe3, 0x59, 0x7e, 0x5b, 0x6b, 0x7e, 0xd2, 0x40, 0xb6, 0x06, 0xdc, + 0x07, 0x45, 0xf1, 0x96, 0xab, 0x67, 0xe6, 0xc1, 0xe5, 0x9e, 0x99, 0x57, 0xae, 0x47, 0x67, 0xcf, + 0xa5, 0xb0, 0xb0, 0xa4, 0xc0, 0xfb, 0x60, 0xcd, 0xa3, 0x8c, 0x11, 0x47, 0x55, 0x36, 0x6f, 0xab, + 0xa0, 0xb5, 0x83, 0xd4, 0x8d, 0x27, 0xe7, 0x26, 0x1a, 0x9d, 0xeb, 0xb9, 0xd3, 0x73, 0x3d, 0x77, + 0x76, 0xae, 0xe7, 0x3e, 0x26, 0xba, 0x36, 0x4a, 0x74, 0xed, 0x34, 0xd1, 0xb5, 0xb3, 0x44, 0xd7, + 0x7e, 0x24, 0xba, 0xf6, 0xf9, 0xa7, 0x9e, 0x7b, 0x53, 0x9a, 0x08, 0xf7, 0x27, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0x45, 0xe3, 0xba, 0xab, 0x07, 0x00, 0x00, +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -98,41 +289,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -140,37 +342,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -178,23 +389,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -202,33 +431,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n5, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -236,67 +474,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n6, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n7, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -304,35 +553,48 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n8, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x12 - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -345,6 +607,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -359,16 +624,26 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -381,6 +656,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -404,6 +682,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -414,14 +695,7 @@ func (m *VolumeError) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -431,7 +705,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -442,9 +716,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -455,6 +734,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -488,8 +768,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -499,7 +779,7 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") @@ -528,7 +808,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -556,7 +836,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -565,6 +845,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -586,7 +869,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -595,6 +878,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -616,7 +902,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -625,6 +911,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -641,6 +930,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -668,7 +960,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -696,7 +988,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -705,6 +997,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -726,7 +1021,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -735,6 +1030,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -752,6 +1050,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -779,7 +1080,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -807,7 +1108,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -817,12 +1118,51 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -832,6 +1172,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -859,7 +1202,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -887,7 +1230,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -897,6 +1240,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -916,7 +1262,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -925,6 +1271,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -946,7 +1295,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -956,6 +1305,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -970,6 +1322,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -997,7 +1352,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1025,7 +1380,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1045,7 +1400,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1054,6 +1409,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1074,7 +1432,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1091,7 +1449,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1101,6 +1459,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1117,7 +1478,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1127,6 +1488,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1163,7 +1527,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1172,6 +1536,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1196,7 +1563,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1205,6 +1572,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1224,6 +1594,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1251,7 +1624,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1279,7 +1652,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1288,6 +1661,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1309,7 +1685,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1319,6 +1695,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,6 +1712,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1399,10 +1781,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1431,6 +1816,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1449,55 +1837,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 704 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, - 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0xcd, 0xf3, 0x52, 0xad, 0xa2, 0xe7, 0x89, 0x82, 0xe4, 0x54, - 0x39, 0x15, 0x44, 0xd7, 0xa4, 0x20, 0x54, 0x71, 0x8b, 0xd5, 0x1e, 0x10, 0x6d, 0x41, 0x5b, 0xc4, - 0x01, 0x38, 0xb0, 0xb1, 0xa7, 0x8e, 0x9b, 0xfa, 0x45, 0xbb, 0xeb, 0x48, 0xbd, 0x71, 0xe2, 0xcc, - 0x8d, 0x6f, 0xc0, 0x67, 0xc9, 0x8d, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x16, 0x5c, 0x40, 0x5e, 0x6f, - 0x5e, 0x68, 0x52, 0x68, 0x7b, 0xf3, 0xcc, 0xce, 0xfc, 0x66, 0xe6, 0xbf, 0xb3, 0x46, 0x3b, 0xfd, - 0x6d, 0x41, 0xfc, 0xc8, 0xea, 0x27, 0x5d, 0xe0, 0x21, 0x48, 0x10, 0xd6, 0x00, 0x42, 0x37, 0xe2, - 0x96, 0x3e, 0x60, 0xb1, 0x6f, 0x09, 0x19, 0x71, 0xe6, 0x81, 0x35, 0x68, 0xb3, 0x93, 0xb8, 0xc7, - 0xda, 0x96, 0x07, 0x21, 0x70, 0x26, 0xc1, 0x25, 0x31, 0x8f, 0x64, 0x84, 0xef, 0xe4, 0xc1, 0x84, - 0xc5, 0x3e, 0xd1, 0xc1, 0x64, 0x1c, 0xdc, 0xd8, 0xf4, 0x7c, 0xd9, 0x4b, 0xba, 0xc4, 0x89, 0x02, - 0xcb, 0x8b, 0xbc, 0xc8, 0x52, 0x39, 0xdd, 0xe4, 0x48, 0x59, 0xca, 0x50, 0x5f, 0x39, 0xab, 0xf1, - 0x68, 0x5a, 0x38, 0x60, 0x4e, 0xcf, 0x0f, 0x81, 0x9f, 0x5a, 0x71, 0xdf, 0xcb, 0x1c, 0xc2, 0x0a, - 0x40, 0x32, 0x6b, 0x30, 0xd7, 0x41, 0xc3, 0xba, 0x2a, 0x8b, 0x27, 0xa1, 0xf4, 0x03, 0x98, 0x4b, - 0x78, 0xfc, 0xa7, 0x04, 0xe1, 0xf4, 0x20, 0x60, 0x97, 0xf3, 0x5a, 0x9f, 0x8b, 0x68, 0xed, 0x55, - 0x74, 0x92, 0x04, 0xd0, 0x91, 0x92, 0x39, 0xbd, 0x00, 0x42, 0x89, 0xdf, 0xa1, 0x4a, 0xd6, 0x98, - 0xcb, 0x24, 0xab, 0x1b, 0xeb, 0xc6, 0x46, 0x75, 0xeb, 0x01, 0x99, 0x4a, 0x32, 0xe1, 0x93, 0xb8, - 0xef, 0x65, 0x0e, 0x41, 0xb2, 0x68, 0x32, 0x68, 0x93, 0xe7, 0xdd, 0x63, 0x70, 0xe4, 0x3e, 0x48, - 0x66, 0xe3, 0xe1, 0xa8, 0x59, 0x48, 0x47, 0x4d, 0x34, 0xf5, 0xd1, 0x09, 0x15, 0x1f, 0xa2, 0xb2, - 0x88, 0xc1, 0xa9, 0x17, 0x15, 0xbd, 0x4d, 0x7e, 0x23, 0x38, 0xb9, 0xdc, 0xde, 0x61, 0x0c, 0x8e, - 0xfd, 0x97, 0xc6, 0x97, 0x33, 0x8b, 0x2a, 0x18, 0x7e, 0x83, 0x96, 0x85, 0x64, 0x32, 0x11, 0xf5, - 0x92, 0xc2, 0x3e, 0xbc, 0x19, 0x56, 0xa5, 0xda, 0xff, 0x68, 0xf0, 0x72, 0x6e, 0x53, 0x8d, 0x6c, - 0x0d, 0x0d, 0x54, 0xbb, 0x9c, 0xb2, 0xe7, 0x0b, 0x89, 0xdf, 0xce, 0x89, 0x45, 0xae, 0x27, 0x56, - 0x96, 0xad, 0xa4, 0x5a, 0xd3, 0x25, 0x2b, 0x63, 0xcf, 0x8c, 0x50, 0x14, 0x2d, 0xf9, 0x12, 0x02, - 0x51, 0x2f, 0xae, 0x97, 0x36, 0xaa, 0x5b, 0x9b, 0x37, 0x1a, 0xc9, 0xfe, 0x5b, 0x93, 0x97, 0x9e, - 0x66, 0x0c, 0x9a, 0xa3, 0x5a, 0x47, 0xe8, 0xbf, 0xb9, 0xe1, 0xa3, 0x84, 0x3b, 0x80, 0xf7, 0x50, - 0x2d, 0x06, 0x2e, 0x7c, 0x21, 0x21, 0x94, 0x79, 0xcc, 0x01, 0x0b, 0x40, 0xcd, 0xb5, 0x6a, 0xd7, - 0xd3, 0x51, 0xb3, 0xf6, 0x62, 0xc1, 0x39, 0x5d, 0x98, 0xd5, 0xfa, 0xb2, 0x40, 0xb2, 0xec, 0xba, - 0xf0, 0x7d, 0x54, 0x61, 0xca, 0x03, 0x5c, 0xa3, 0x27, 0x12, 0x74, 0xb4, 0x9f, 0x4e, 0x22, 0xd4, - 0xb5, 0xaa, 0xf6, 0xf4, 0xb6, 0xdc, 0xf0, 0x5a, 0x55, 0xea, 0xcc, 0xb5, 0x2a, 0x9b, 0x6a, 0x64, - 0xd6, 0x4a, 0x18, 0xb9, 0xf9, 0x94, 0xa5, 0x5f, 0x5b, 0x39, 0xd0, 0x7e, 0x3a, 0x89, 0x68, 0xfd, - 0x28, 0x2d, 0x90, 0x4e, 0xed, 0xc7, 0xcc, 0x4c, 0xae, 0x9a, 0xa9, 0x32, 0x37, 0x93, 0x3b, 0x99, - 0xc9, 0xc5, 0x9f, 0x0c, 0x84, 0xd9, 0x04, 0xb1, 0x3f, 0xde, 0x9f, 0xfc, 0x92, 0x9f, 0xdd, 0x62, - 0x6f, 0x49, 0x67, 0x8e, 0xb6, 0x1b, 0x4a, 0x7e, 0x6a, 0x37, 0x74, 0x17, 0x78, 0x3e, 0x80, 0x2e, - 0x68, 0x01, 0x1f, 0xa3, 0x6a, 0xee, 0xdd, 0xe5, 0x3c, 0xe2, 0xfa, 0x25, 0x6d, 0x5c, 0xa3, 0x23, - 0x15, 0x6f, 0x9b, 0xe9, 0xa8, 0x59, 0xed, 0x4c, 0x01, 0xdf, 0x47, 0xcd, 0xea, 0xcc, 0x39, 0x9d, - 0x85, 0x67, 0xb5, 0x5c, 0x98, 0xd6, 0x2a, 0xdf, 0xa6, 0xd6, 0x0e, 0x5c, 0x5d, 0x6b, 0x06, 0xde, - 0xd8, 0x45, 0xff, 0x5f, 0x21, 0x11, 0x5e, 0x43, 0xa5, 0x3e, 0x9c, 0xe6, 0x9b, 0x48, 0xb3, 0x4f, - 0x5c, 0x43, 0x4b, 0x03, 0x76, 0x92, 0xe4, 0x1b, 0xb7, 0x4a, 0x73, 0xe3, 0x49, 0x71, 0xdb, 0x68, - 0x7d, 0x30, 0xd0, 0x6c, 0x0d, 0xbc, 0x87, 0xca, 0xd9, 0xef, 0x55, 0xbf, 0xfc, 0x7b, 0xd7, 0x7b, - 0xf9, 0x2f, 0xfd, 0x00, 0xa6, 0x7f, 0xb0, 0xcc, 0xa2, 0x8a, 0x82, 0xef, 0xa2, 0x95, 0x00, 0x84, - 0x60, 0x9e, 0xae, 0x6c, 0xff, 0xab, 0x83, 0x56, 0xf6, 0x73, 0x37, 0x1d, 0x9f, 0xdb, 0x64, 0x78, - 0x61, 0x16, 0xce, 0x2e, 0xcc, 0xc2, 0xf9, 0x85, 0x59, 0x78, 0x9f, 0x9a, 0xc6, 0x30, 0x35, 0x8d, - 0xb3, 0xd4, 0x34, 0xce, 0x53, 0xd3, 0xf8, 0x9a, 0x9a, 0xc6, 0xc7, 0x6f, 0x66, 0xe1, 0x75, 0x65, - 0x2c, 0xdc, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0xba, 0xdb, 0x12, 0x1a, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index fdc4ad25..76019639 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.storage.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -34,7 +35,7 @@ option go_package = "v1alpha1"; // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -52,7 +53,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -68,6 +69,15 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 964bb5f7..39408857 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -16,7 +16,10 @@ limitations under the License. package v1alpha1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -30,7 +33,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -51,7 +54,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -81,7 +84,14 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // Placeholder for *VolumeSource to accommodate inline volumes in pods. + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` } // VolumeAttachmentStatus is the status of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index 3701b086..2e821616 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -40,7 +40,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index e27c6ff3..3debf9df 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -56,7 +57,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -93,6 +94,11 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(v1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index 0cde6ec0..cd35af34 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -17,43 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto - - It has these top-level messages: - CSIDriver - CSIDriverList - CSIDriverSpec - CSINode - CSINodeDriver - CSINodeList - CSINodeSpec - StorageClass - StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -66,65 +46,453 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CSIDriver) Reset() { *m = CSIDriver{} } -func (*CSIDriver) ProtoMessage() {} -func (*CSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CSIDriver) Reset() { *m = CSIDriver{} } +func (*CSIDriver) ProtoMessage() {} +func (*CSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{0} +} +func (m *CSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriver.Merge(m, src) +} +func (m *CSIDriver) XXX_Size() int { + return m.Size() +} +func (m *CSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriver proto.InternalMessageInfo + +func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } +func (*CSIDriverList) ProtoMessage() {} +func (*CSIDriverList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{1} +} +func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverList.Merge(m, src) +} +func (m *CSIDriverList) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverList) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo + +func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } +func (*CSIDriverSpec) ProtoMessage() {} +func (*CSIDriverSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{2} +} +func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverSpec.Merge(m, src) +} +func (m *CSIDriverSpec) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo + +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{3} +} +func (m *CSINode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINode) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINode.Merge(m, src) +} +func (m *CSINode) XXX_Size() int { + return m.Size() +} +func (m *CSINode) XXX_DiscardUnknown() { + xxx_messageInfo_CSINode.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINode proto.InternalMessageInfo + +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{4} +} +func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeDriver.Merge(m, src) +} +func (m *CSINodeDriver) XXX_Size() int { + return m.Size() +} +func (m *CSINodeDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo + +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{5} +} +func (m *CSINodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeList.Merge(m, src) +} +func (m *CSINodeList) XXX_Size() int { + return m.Size() +} +func (m *CSINodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeList proto.InternalMessageInfo + +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{6} +} +func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeSpec.Merge(m, src) +} +func (m *CSINodeSpec) XXX_Size() int { + return m.Size() +} +func (m *CSINodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{7} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} -func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } -func (*CSIDriverList) ProtoMessage() {} -func (*CSIDriverList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_StorageClass proto.InternalMessageInfo -func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } -func (*CSIDriverSpec) ProtoMessage() {} -func (*CSIDriverSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{8} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} -func (m *CSINode) Reset() { *m = CSINode{} } -func (*CSINode) ProtoMessage() {} -func (*CSINode) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo -func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } -func (*CSINodeDriver) ProtoMessage() {} -func (*CSINodeDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{9} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} -func (m *CSINodeList) Reset() { *m = CSINodeList{} } -func (*CSINodeList) ProtoMessage() {} -func (*CSINodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo -func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } -func (*CSINodeSpec) ProtoMessage() {} -func (*CSINodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{10} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{11} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{12} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{13} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{14} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_VolumeError proto.InternalMessageInfo + +func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } +func (*VolumeNodeResources) ProtoMessage() {} +func (*VolumeNodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{15} +} +func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeResources.Merge(m, src) +} +func (m *VolumeNodeResources) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo func init() { proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver") @@ -135,18 +503,114 @@ func init() { proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList") proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") + proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_7d2980599fd0de80) } + +var fileDescriptor_7d2980599fd0de80 = []byte{ + // 1344 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0xdb, 0x46, + 0x1b, 0x37, 0x2d, 0x7f, 0x9e, 0xec, 0x44, 0xbe, 0x18, 0xef, 0xab, 0x57, 0x83, 0x64, 0xe8, 0x45, + 0x1b, 0x27, 0x48, 0xc8, 0x24, 0x48, 0x83, 0x20, 0x40, 0x07, 0xd3, 0x31, 0x50, 0x25, 0x96, 0xe3, + 0x9e, 0x8d, 0xa0, 0x08, 0x3a, 0xf4, 0x44, 0x3e, 0x91, 0x19, 0x93, 0x3c, 0x86, 0x3c, 0xa9, 0xd5, + 0xd6, 0xa9, 0x73, 0xd1, 0xa1, 0x7f, 0x41, 0xff, 0x85, 0x16, 0x68, 0x97, 0x8e, 0xcd, 0x54, 0x04, + 0x9d, 0x32, 0x09, 0x0d, 0xbb, 0x76, 0xeb, 0x66, 0x74, 0x28, 0xee, 0x78, 0x12, 0x29, 0x89, 0x8a, + 0xed, 0x0e, 0xde, 0x78, 0xcf, 0xc7, 0xef, 0xf9, 0x7e, 0xee, 0x88, 0xb6, 0x8f, 0xef, 0x47, 0xba, + 0xc3, 0x8c, 0xe3, 0x4e, 0x0b, 0x42, 0x1f, 0x38, 0x44, 0x46, 0x17, 0x7c, 0x9b, 0x85, 0x86, 0x62, + 0xd0, 0xc0, 0x31, 0x22, 0xce, 0x42, 0xda, 0x06, 0xa3, 0x7b, 0xbb, 0x05, 0x9c, 0xde, 0x36, 0xda, + 0xe0, 0x43, 0x48, 0x39, 0xd8, 0x7a, 0x10, 0x32, 0xce, 0x70, 0x25, 0x91, 0xd5, 0x69, 0xe0, 0xe8, + 0x4a, 0x56, 0x57, 0xb2, 0x95, 0x9b, 0x6d, 0x87, 0x1f, 0x75, 0x5a, 0xba, 0xc5, 0x3c, 0xa3, 0xcd, + 0xda, 0xcc, 0x90, 0x2a, 0xad, 0xce, 0x73, 0x79, 0x92, 0x07, 0xf9, 0x95, 0x40, 0x55, 0xea, 0x19, + 0xb3, 0x16, 0x0b, 0x85, 0xcd, 0x71, 0x73, 0x95, 0xbb, 0xa9, 0x8c, 0x47, 0xad, 0x23, 0xc7, 0x87, + 0xb0, 0x67, 0x04, 0xc7, 0x6d, 0x41, 0x88, 0x0c, 0x0f, 0x38, 0xcd, 0xd3, 0x32, 0xa6, 0x69, 0x85, + 0x1d, 0x9f, 0x3b, 0x1e, 0x4c, 0x28, 0xdc, 0x3b, 0x4d, 0x21, 0xb2, 0x8e, 0xc0, 0xa3, 0xe3, 0x7a, + 0xf5, 0x9f, 0x34, 0xb4, 0xbc, 0x7d, 0xd0, 0x78, 0x18, 0x3a, 0x5d, 0x08, 0xf1, 0x67, 0x68, 0x49, + 0x78, 0x64, 0x53, 0x4e, 0xcb, 0xda, 0x86, 0xb6, 0x59, 0xbc, 0x73, 0x4b, 0x4f, 0xd3, 0x35, 0x04, + 0xd6, 0x83, 0xe3, 0xb6, 0x20, 0x44, 0xba, 0x90, 0xd6, 0xbb, 0xb7, 0xf5, 0x27, 0xad, 0x17, 0x60, + 0xf1, 0x26, 0x70, 0x6a, 0xe2, 0x57, 0xfd, 0xda, 0x4c, 0xdc, 0xaf, 0xa1, 0x94, 0x46, 0x86, 0xa8, + 0xf8, 0x31, 0x9a, 0x8b, 0x02, 0xb0, 0xca, 0xb3, 0x12, 0xfd, 0x9a, 0x3e, 0xbd, 0x18, 0xfa, 0xd0, + 0xad, 0x83, 0x00, 0x2c, 0x73, 0x45, 0xc1, 0xce, 0x89, 0x13, 0x91, 0x20, 0xf5, 0x1f, 0x35, 0xb4, + 0x3a, 0x94, 0xda, 0x75, 0x22, 0x8e, 0x3f, 0x9d, 0x08, 0x40, 0x3f, 0x5b, 0x00, 0x42, 0x5b, 0xba, + 0x5f, 0x52, 0x76, 0x96, 0x06, 0x94, 0x8c, 0xf3, 0x8f, 0xd0, 0xbc, 0xc3, 0xc1, 0x8b, 0xca, 0xb3, + 0x1b, 0x85, 0xcd, 0xe2, 0x9d, 0xf7, 0xce, 0xe4, 0xbd, 0xb9, 0xaa, 0x10, 0xe7, 0x1b, 0x42, 0x97, + 0x24, 0x10, 0xf5, 0x3f, 0xb3, 0xbe, 0x8b, 0x98, 0xf0, 0x03, 0x74, 0x89, 0x72, 0x4e, 0xad, 0x23, + 0x02, 0x2f, 0x3b, 0x4e, 0x08, 0xb6, 0x8c, 0x60, 0xc9, 0xc4, 0x71, 0xbf, 0x76, 0x69, 0x6b, 0x84, + 0x43, 0xc6, 0x24, 0x85, 0x6e, 0xc0, 0xec, 0x86, 0xff, 0x9c, 0x3d, 0xf1, 0x9b, 0xac, 0xe3, 0x73, + 0x99, 0x60, 0xa5, 0xbb, 0x3f, 0xc2, 0x21, 0x63, 0x92, 0xd8, 0x42, 0xeb, 0x5d, 0xe6, 0x76, 0x3c, + 0xd8, 0x75, 0x9e, 0x83, 0xd5, 0xb3, 0x5c, 0x68, 0x32, 0x1b, 0xa2, 0x72, 0x61, 0xa3, 0xb0, 0xb9, + 0x6c, 0x1a, 0x71, 0xbf, 0xb6, 0xfe, 0x34, 0x87, 0x7f, 0xd2, 0xaf, 0x5d, 0xc9, 0xa1, 0x93, 0x5c, + 0xb0, 0xfa, 0x0f, 0x1a, 0x5a, 0xdc, 0x3e, 0x68, 0xec, 0x31, 0x1b, 0x2e, 0xa0, 0xcb, 0x1a, 0x23, + 0x5d, 0x76, 0xf5, 0x94, 0x3a, 0x09, 0xa7, 0xa6, 0xf6, 0xd8, 0x5f, 0x49, 0x9d, 0x84, 0x8c, 0x1a, + 0x92, 0x0d, 0x34, 0xe7, 0x53, 0x0f, 0xa4, 0xeb, 0xcb, 0xa9, 0xce, 0x1e, 0xf5, 0x80, 0x48, 0x0e, + 0x7e, 0x1f, 0x2d, 0xf8, 0xcc, 0x86, 0xc6, 0x43, 0xe9, 0xc0, 0xb2, 0x79, 0x49, 0xc9, 0x2c, 0xec, + 0x49, 0x2a, 0x51, 0x5c, 0x7c, 0x17, 0xad, 0x70, 0x16, 0x30, 0x97, 0xb5, 0x7b, 0x8f, 0xa1, 0x37, + 0xc8, 0x78, 0x29, 0xee, 0xd7, 0x56, 0x0e, 0x33, 0x74, 0x32, 0x22, 0x85, 0x5b, 0xa8, 0x48, 0x5d, + 0x97, 0x59, 0x94, 0xd3, 0x96, 0x0b, 0xe5, 0x39, 0x19, 0xa3, 0xf1, 0xae, 0x18, 0x93, 0x32, 0x09, + 0xe3, 0x04, 0x22, 0xd6, 0x09, 0x2d, 0x88, 0xcc, 0xcb, 0x71, 0xbf, 0x56, 0xdc, 0x4a, 0x71, 0x48, + 0x16, 0xb4, 0xfe, 0xbd, 0x86, 0x8a, 0x2a, 0xea, 0x0b, 0x98, 0xab, 0x8f, 0x46, 0xe7, 0xea, 0xff, + 0x67, 0xa8, 0xd7, 0x94, 0xa9, 0xb2, 0x86, 0x6e, 0xcb, 0x91, 0x3a, 0x44, 0x8b, 0xb6, 0x2c, 0x5a, + 0x54, 0xd6, 0x24, 0xf4, 0xb5, 0x33, 0x40, 0xab, 0xb1, 0xbd, 0xac, 0x0c, 0x2c, 0x26, 0xe7, 0x88, + 0x0c, 0xa0, 0xea, 0xdf, 0x2c, 0xa0, 0x95, 0x83, 0x44, 0x77, 0xdb, 0xa5, 0x51, 0x74, 0x01, 0x0d, + 0xfd, 0x01, 0x2a, 0x06, 0x21, 0xeb, 0x3a, 0x91, 0xc3, 0x7c, 0x08, 0x55, 0x5b, 0x5d, 0x51, 0x2a, + 0xc5, 0xfd, 0x94, 0x45, 0xb2, 0x72, 0xd8, 0x45, 0x28, 0xa0, 0x21, 0xf5, 0x80, 0x8b, 0x14, 0x14, + 0x64, 0x0a, 0xee, 0xbf, 0x2b, 0x05, 0xd9, 0xb0, 0xf4, 0xfd, 0xa1, 0xea, 0x8e, 0xcf, 0xc3, 0x5e, + 0xea, 0x62, 0xca, 0x20, 0x19, 0x7c, 0x7c, 0x8c, 0x56, 0x43, 0xb0, 0x5c, 0xea, 0x78, 0xfb, 0xcc, + 0x75, 0xac, 0x9e, 0x6c, 0xcd, 0x65, 0x73, 0x27, 0xee, 0xd7, 0x56, 0x49, 0x96, 0x71, 0xd2, 0xaf, + 0xdd, 0x9a, 0xbc, 0x3a, 0xf5, 0x7d, 0x08, 0x23, 0x27, 0xe2, 0xe0, 0xf3, 0xa4, 0x61, 0x47, 0x74, + 0xc8, 0x28, 0xb6, 0x98, 0x1d, 0x4f, 0xac, 0xaf, 0x27, 0x01, 0x77, 0x98, 0x1f, 0x95, 0xe7, 0xd3, + 0xd9, 0x69, 0x66, 0xe8, 0x64, 0x44, 0x0a, 0xef, 0xa2, 0x75, 0xd1, 0xe6, 0x9f, 0x27, 0x06, 0x76, + 0xbe, 0x08, 0xa8, 0x2f, 0x52, 0x55, 0x5e, 0x90, 0xdb, 0xb2, 0x2c, 0x76, 0xdd, 0x56, 0x0e, 0x9f, + 0xe4, 0x6a, 0xe1, 0x4f, 0xd0, 0x5a, 0xb2, 0xec, 0x4c, 0xc7, 0xb7, 0x1d, 0xbf, 0x2d, 0x56, 0x5d, + 0x79, 0x51, 0x06, 0x7d, 0x3d, 0xee, 0xd7, 0xd6, 0x9e, 0x8e, 0x33, 0x4f, 0xf2, 0x88, 0x64, 0x12, + 0x04, 0xbf, 0x44, 0x6b, 0xd2, 0x22, 0xd8, 0x6a, 0x11, 0x38, 0x10, 0x95, 0x97, 0x64, 0xfd, 0x36, + 0xb3, 0xf5, 0x13, 0xa9, 0x13, 0x8d, 0x34, 0x58, 0x17, 0x07, 0xe0, 0x82, 0xc5, 0x59, 0x78, 0x08, + 0xa1, 0x67, 0xfe, 0x4f, 0xd5, 0x6b, 0x6d, 0x6b, 0x1c, 0x8a, 0x4c, 0xa2, 0x57, 0x3e, 0x44, 0x97, + 0xc7, 0x0a, 0x8e, 0x4b, 0xa8, 0x70, 0x0c, 0xbd, 0x64, 0xd1, 0x11, 0xf1, 0x89, 0xd7, 0xd1, 0x7c, + 0x97, 0xba, 0x1d, 0x48, 0x3a, 0x90, 0x24, 0x87, 0x07, 0xb3, 0xf7, 0xb5, 0xfa, 0xcf, 0x1a, 0x2a, + 0x65, 0xbb, 0xe7, 0x02, 0xd6, 0x46, 0x73, 0x74, 0x6d, 0x6c, 0x9e, 0xb5, 0xb1, 0xa7, 0xec, 0x8e, + 0xef, 0x66, 0x51, 0x29, 0x29, 0x4e, 0x72, 0xd9, 0x7a, 0xe0, 0xf3, 0x0b, 0x18, 0x6d, 0x32, 0x72, + 0x57, 0xdd, 0x3a, 0x7d, 0x8f, 0xa7, 0xde, 0x4d, 0xbb, 0xb4, 0xf0, 0x33, 0xb4, 0x10, 0x71, 0xca, + 0x3b, 0x62, 0xe6, 0x05, 0xea, 0x9d, 0x73, 0xa1, 0x4a, 0xcd, 0xf4, 0xd2, 0x4a, 0xce, 0x44, 0x21, + 0xd6, 0x7f, 0xd1, 0xd0, 0xfa, 0xb8, 0xca, 0x05, 0x14, 0xfb, 0xe3, 0xd1, 0x62, 0xdf, 0x38, 0x4f, + 0x44, 0x53, 0x0a, 0xfe, 0x9b, 0x86, 0xfe, 0x33, 0x11, 0xbc, 0xbc, 0x1e, 0xc5, 0x9e, 0x08, 0xc6, + 0xb6, 0xd1, 0x5e, 0x7a, 0xe7, 0xcb, 0x3d, 0xb1, 0x9f, 0xc3, 0x27, 0xb9, 0x5a, 0xf8, 0x05, 0x2a, + 0x39, 0xbe, 0xeb, 0xf8, 0x90, 0xd0, 0x0e, 0xd2, 0x72, 0xe7, 0x0e, 0xf3, 0x38, 0xb2, 0x2c, 0xf3, + 0x7a, 0xdc, 0xaf, 0x95, 0x1a, 0x63, 0x28, 0x64, 0x02, 0xb7, 0xfe, 0x6b, 0x4e, 0x79, 0xe4, 0x5d, + 0x78, 0x03, 0x2d, 0x25, 0x8f, 0x46, 0x08, 0x55, 0x18, 0xc3, 0x74, 0x6f, 0x29, 0x3a, 0x19, 0x4a, + 0xc8, 0x0e, 0x92, 0xa9, 0x50, 0x8e, 0x9e, 0xaf, 0x83, 0xa4, 0x66, 0xa6, 0x83, 0xe4, 0x99, 0x28, + 0x44, 0xe1, 0x89, 0x78, 0x00, 0xc9, 0x84, 0x16, 0x46, 0x3d, 0xd9, 0x53, 0x74, 0x32, 0x94, 0xa8, + 0xff, 0x5d, 0xc8, 0xa9, 0x92, 0x6c, 0xc5, 0x4c, 0x48, 0x83, 0xb7, 0xf2, 0x78, 0x48, 0xf6, 0x30, + 0x24, 0x1b, 0x7f, 0xab, 0x21, 0x4c, 0x87, 0x10, 0xcd, 0x41, 0xab, 0x26, 0xfd, 0xf4, 0xe8, 0xfc, + 0x13, 0xa2, 0x6f, 0x4d, 0x80, 0x25, 0xf7, 0x64, 0x45, 0x39, 0x81, 0x27, 0x05, 0x48, 0x8e, 0x07, + 0xd8, 0x41, 0xc5, 0x84, 0xba, 0x13, 0x86, 0x2c, 0x54, 0x23, 0x7b, 0xf5, 0x74, 0x87, 0xa4, 0xb8, + 0x59, 0x95, 0x0f, 0xb9, 0x54, 0xff, 0xa4, 0x5f, 0x2b, 0x66, 0xf8, 0x24, 0x8b, 0x2d, 0x4c, 0xd9, + 0x90, 0x9a, 0x9a, 0xfb, 0x17, 0xa6, 0x1e, 0xc2, 0x74, 0x53, 0x19, 0xec, 0xca, 0x0e, 0xfa, 0xef, + 0x94, 0x04, 0x9d, 0xeb, 0x5e, 0xf9, 0x4a, 0x43, 0x59, 0x1b, 0x78, 0x17, 0xcd, 0x89, 0xff, 0x59, + 0xb5, 0x61, 0xae, 0x9f, 0x6d, 0xc3, 0x1c, 0x3a, 0x1e, 0xa4, 0x8b, 0x52, 0x9c, 0x88, 0x44, 0xc1, + 0xd7, 0xd0, 0xa2, 0x07, 0x51, 0x44, 0xdb, 0xca, 0x72, 0xfa, 0xea, 0x6b, 0x26, 0x64, 0x32, 0xe0, + 0xd7, 0xef, 0xa1, 0x2b, 0x39, 0xef, 0x68, 0x5c, 0x43, 0xf3, 0x96, 0xfc, 0xe1, 0x12, 0x0e, 0xcd, + 0x9b, 0xcb, 0x62, 0xcb, 0x6c, 0xcb, 0xff, 0xac, 0x84, 0x6e, 0xde, 0x7c, 0xf5, 0xb6, 0x3a, 0xf3, + 0xfa, 0x6d, 0x75, 0xe6, 0xcd, 0xdb, 0xea, 0xcc, 0x97, 0x71, 0x55, 0x7b, 0x15, 0x57, 0xb5, 0xd7, + 0x71, 0x55, 0x7b, 0x13, 0x57, 0xb5, 0xdf, 0xe3, 0xaa, 0xf6, 0xf5, 0x1f, 0xd5, 0x99, 0x67, 0x8b, + 0x2a, 0xdf, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x72, 0xff, 0xde, 0x2e, 0xe4, 0x10, 0x00, 0x00, +} + func (m *CSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -154,33 +618,42 @@ func (m *CSIDriver) Marshal() (dAtA []byte, err error) { } func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -188,37 +661,46 @@ func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { } func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -226,37 +708,51 @@ func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { } func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.AttachRequired != nil { - dAtA[i] = 0x8 - i++ - if *m.AttachRequired { + if len(m.VolumeLifecycleModes) > 0 { + for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumeLifecycleModes[iNdEx]) + copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.PodInfoOnMount != nil { + i-- + if *m.PodInfoOnMount { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.PodInfoOnMount != nil { + i-- dAtA[i] = 0x10 - i++ - if *m.PodInfoOnMount { + } + if m.AttachRequired != nil { + i-- + if *m.AttachRequired { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *CSINode) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -264,33 +760,42 @@ func (m *CSINode) Marshal() (dAtA []byte, err error) { } func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -298,40 +803,53 @@ func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { } func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) - i += copy(dAtA[i:], m.NodeID) + if m.Allocatable != nil { + { + size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -339,37 +857,46 @@ func (m *CSINodeList) Marshal() (dAtA []byte, err error) { } func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -377,29 +904,36 @@ func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { } func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Drivers) > 0 { - for _, msg := range m.Drivers { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -407,100 +941,108 @@ func (m *StorageClass) Marshal() (dAtA []byte, err error) { } func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x42 } } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a + } if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ + i-- if *m.AllowVolumeExpansion { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x30 + } + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StorageClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -508,37 +1050,46 @@ func (m *StorageClassList) Marshal() (dAtA []byte, err error) { } func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -546,41 +1097,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -588,37 +1150,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -626,23 +1197,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -650,33 +1239,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n13, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -684,67 +1282,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n14, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n15, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -752,35 +1361,76 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n16, err := m.Time.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n16 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + return dAtA[:n], nil +} + +func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -791,6 +1441,9 @@ func (m *CSIDriver) Size() (n int) { } func (m *CSIDriverList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -805,6 +1458,9 @@ func (m *CSIDriverList) Size() (n int) { } func (m *CSIDriverSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.AttachRequired != nil { @@ -813,10 +1469,19 @@ func (m *CSIDriverSpec) Size() (n int) { if m.PodInfoOnMount != nil { n += 2 } + if len(m.VolumeLifecycleModes) > 0 { + for _, s := range m.VolumeLifecycleModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *CSINode) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -827,6 +1492,9 @@ func (m *CSINode) Size() (n int) { } func (m *CSINodeDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -839,10 +1507,17 @@ func (m *CSINodeDriver) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Allocatable != nil { + l = m.Allocatable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *CSINodeList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -857,6 +1532,9 @@ func (m *CSINodeList) Size() (n int) { } func (m *CSINodeSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Drivers) > 0 { @@ -869,6 +1547,9 @@ func (m *CSINodeSpec) Size() (n int) { } func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -910,6 +1591,9 @@ func (m *StorageClass) Size() (n int) { } func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -924,6 +1608,9 @@ func (m *StorageClassList) Size() (n int) { } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -936,6 +1623,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -950,16 +1640,26 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -972,6 +1672,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -995,6 +1698,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -1004,16 +1710,21 @@ func (m *VolumeError) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } +func (m *VolumeNodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } @@ -1022,7 +1733,7 @@ func (this *CSIDriver) String() string { return "nil" } s := strings.Join([]string{`&CSIDriver{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1032,9 +1743,14 @@ func (this *CSIDriverList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CSIDriver{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CSIDriverList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1046,6 +1762,7 @@ func (this *CSIDriverSpec) String() string { s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, + `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `}`, }, "") return s @@ -1055,7 +1772,7 @@ func (this *CSINode) String() string { return "nil" } s := strings.Join([]string{`&CSINode{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1069,6 +1786,7 @@ func (this *CSINodeDriver) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, `}`, }, "") return s @@ -1077,9 +1795,14 @@ func (this *CSINodeList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CSINode{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CSINodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSINode", "CSINode", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1088,8 +1811,13 @@ func (this *CSINodeSpec) String() string { if this == nil { return "nil" } + repeatedStringForDrivers := "[]CSINodeDriver{" + for _, f := range this.Drivers { + repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForDrivers += "}" s := strings.Join([]string{`&CSINodeSpec{`, - `Drivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Drivers), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + `,`, + `Drivers:` + repeatedStringForDrivers + `,`, `}`, }, "") return s @@ -1098,6 +1826,11 @@ func (this *StorageClass) String() string { if this == nil { return "nil" } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" keysForParameters := make([]string, 0, len(this.Parameters)) for k := range this.Parameters { keysForParameters = append(keysForParameters, k) @@ -1109,14 +1842,14 @@ func (this *StorageClass) String() string { } mapStringForParameters += "}" s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, `Parameters:` + mapStringForParameters + `,`, `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, `}`, }, "") return s @@ -1125,9 +1858,14 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1137,7 +1875,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1148,9 +1886,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1161,6 +1904,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -1194,8 +1938,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -1205,12 +1949,22 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") return s } +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1234,7 +1988,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1262,7 +2016,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1271,6 +2025,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1292,7 +2049,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1301,6 +2058,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1317,6 +2077,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1344,7 +2107,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1372,7 +2135,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1381,6 +2144,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1402,7 +2168,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1411,6 +2177,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1428,6 +2197,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1455,7 +2227,7 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1483,7 +2255,7 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1494,7 +2266,28 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) } - var v int + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PodInfoOnMount = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1504,13 +2297,24 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.PodInfoOnMount = &b + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1520,6 +2324,9 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1547,7 +2354,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1575,7 +2382,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1584,6 +2391,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1605,7 +2415,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1614,6 +2424,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1630,6 +2443,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1657,7 +2473,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1685,7 +2501,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1695,6 +2511,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1714,7 +2533,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1724,6 +2543,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1743,7 +2565,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1753,11 +2575,50 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = &VolumeNodeResources{} + } + if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1767,6 +2628,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1794,7 +2658,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1822,7 +2686,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1831,6 +2695,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1852,7 +2719,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1861,6 +2728,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1878,6 +2748,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1905,7 +2778,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1933,7 +2806,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1942,6 +2815,9 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1959,6 +2835,9 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1986,7 +2865,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2014,7 +2893,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2023,6 +2902,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2044,7 +2926,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2054,6 +2936,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2073,7 +2958,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2082,6 +2967,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2102,7 +2990,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2119,7 +3007,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2129,6 +3017,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2145,7 +3036,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2155,6 +3046,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -2191,7 +3085,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2201,6 +3095,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2221,7 +3118,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2231,6 +3128,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2250,7 +3150,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2271,7 +3171,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2281,6 +3181,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2301,7 +3204,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2310,10 +3213,13 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2327,6 +3233,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2354,7 +3263,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2382,7 +3291,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2391,6 +3300,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2412,7 +3324,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2421,6 +3333,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2438,6 +3353,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2465,7 +3383,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2493,7 +3411,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2502,6 +3420,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2523,7 +3444,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2532,6 +3453,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2553,7 +3477,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2562,6 +3486,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2578,6 +3505,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2605,7 +3535,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2633,7 +3563,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2642,6 +3572,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2663,7 +3596,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2672,6 +3605,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2689,6 +3625,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2716,7 +3655,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2744,7 +3683,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2754,12 +3693,51 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2769,6 +3747,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2796,7 +3777,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2824,7 +3805,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2834,6 +3815,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2853,7 +3837,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2862,6 +3846,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2883,7 +3870,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2893,6 +3880,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2907,6 +3897,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2934,7 +3927,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2962,7 +3955,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2982,7 +3975,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2991,6 +3984,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3011,7 +4007,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3028,7 +4024,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3038,6 +4034,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -3054,7 +4053,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3064,6 +4063,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -3100,7 +4102,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3109,6 +4111,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3133,7 +4138,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3142,6 +4147,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3161,6 +4169,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3188,7 +4199,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3216,7 +4227,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3225,6 +4236,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3246,7 +4260,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3256,6 +4270,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3270,6 +4287,82 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3336,10 +4429,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3368,6 +4464,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3386,87 +4485,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1216 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x4e, 0x9c, 0x8c, 0x93, 0x36, 0x59, 0x22, 0x30, 0x3e, 0xd8, 0x91, 0x11, 0x34, - 0xad, 0xda, 0x75, 0x1b, 0x15, 0x14, 0x55, 0xe2, 0x60, 0x27, 0x91, 0x70, 0x1b, 0x27, 0x66, 0x12, - 0x55, 0xa8, 0xe2, 0xc0, 0x64, 0xf7, 0xc5, 0x59, 0xec, 0xdd, 0xd9, 0xce, 0x8e, 0x0d, 0xbe, 0x71, - 0x82, 0x23, 0x88, 0x03, 0x9f, 0x80, 0xaf, 0x00, 0x12, 0x5c, 0x38, 0x92, 0x13, 0xea, 0xb1, 0x27, - 0x8b, 0x98, 0x6f, 0x11, 0x71, 0x40, 0x33, 0x3b, 0xf6, 0xee, 0xfa, 0x4f, 0xe3, 0x70, 0xf0, 0xcd, - 0xf3, 0xde, 0xfb, 0xfd, 0xde, 0xdf, 0x79, 0xb3, 0x46, 0xbb, 0x8d, 0x1d, 0xdf, 0xb0, 0x69, 0xb1, - 0xd1, 0x3a, 0x05, 0xe6, 0x02, 0x07, 0xbf, 0xd8, 0x06, 0xd7, 0xa2, 0xac, 0xa8, 0x14, 0xc4, 0xb3, - 0x8b, 0x3e, 0xa7, 0x8c, 0xd4, 0xa1, 0xd8, 0x7e, 0x74, 0x0a, 0x9c, 0x3c, 0x2a, 0xd6, 0xc1, 0x05, - 0x46, 0x38, 0x58, 0x86, 0xc7, 0x28, 0xa7, 0x7a, 0x36, 0xb0, 0x35, 0x88, 0x67, 0x1b, 0xca, 0xd6, - 0x50, 0xb6, 0xd9, 0x07, 0x75, 0x9b, 0x9f, 0xb7, 0x4e, 0x0d, 0x93, 0x3a, 0xc5, 0x3a, 0xad, 0xd3, - 0xa2, 0x84, 0x9c, 0xb6, 0xce, 0xe4, 0x49, 0x1e, 0xe4, 0xaf, 0x80, 0x2a, 0x5b, 0x88, 0xb8, 0x35, - 0x29, 0x13, 0x3e, 0x87, 0xdd, 0x65, 0x1f, 0x87, 0x36, 0x0e, 0x31, 0xcf, 0x6d, 0x17, 0x58, 0xa7, - 0xe8, 0x35, 0xea, 0x42, 0xe0, 0x17, 0x1d, 0xe0, 0x64, 0x1c, 0xaa, 0x38, 0x09, 0xc5, 0x5a, 0x2e, - 0xb7, 0x1d, 0x18, 0x01, 0x7c, 0x74, 0x1d, 0xc0, 0x37, 0xcf, 0xc1, 0x21, 0xc3, 0xb8, 0xc2, 0xef, - 0x1a, 0x5a, 0xde, 0x3d, 0xae, 0xec, 0x31, 0xbb, 0x0d, 0x4c, 0xff, 0x02, 0x2d, 0x89, 0x88, 0x2c, - 0xc2, 0x49, 0x46, 0xdb, 0xd4, 0xb6, 0xd2, 0xdb, 0x0f, 0x8d, 0xb0, 0x5c, 0x03, 0x62, 0xc3, 0x6b, - 0xd4, 0x85, 0xc0, 0x37, 0x84, 0xb5, 0xd1, 0x7e, 0x64, 0x1c, 0x9d, 0x7e, 0x09, 0x26, 0xaf, 0x02, - 0x27, 0x65, 0xfd, 0xa2, 0x9b, 0x9f, 0xeb, 0x75, 0xf3, 0x28, 0x94, 0xe1, 0x01, 0xab, 0xfe, 0x0c, - 0x25, 0x7d, 0x0f, 0xcc, 0xcc, 0xbc, 0x64, 0xbf, 0x6b, 0x4c, 0x6e, 0x86, 0x31, 0x08, 0xeb, 0xd8, - 0x03, 0xb3, 0xbc, 0xa2, 0x68, 0x93, 0xe2, 0x84, 0x25, 0x49, 0xe1, 0x37, 0x0d, 0xad, 0x0e, 0xac, - 0x0e, 0x6c, 0x9f, 0xeb, 0x9f, 0x8f, 0x24, 0x60, 0x4c, 0x97, 0x80, 0x40, 0xcb, 0xf0, 0xd7, 0x94, - 0x9f, 0xa5, 0xbe, 0x24, 0x12, 0xfc, 0x53, 0xb4, 0x60, 0x73, 0x70, 0xfc, 0xcc, 0xfc, 0x66, 0x62, - 0x2b, 0xbd, 0xfd, 0xfe, 0x54, 0xd1, 0x97, 0x57, 0x15, 0xe3, 0x42, 0x45, 0x60, 0x71, 0x40, 0x51, - 0xf8, 0x2e, 0x1a, 0xbb, 0xc8, 0x49, 0x7f, 0x82, 0x6e, 0x11, 0xce, 0x89, 0x79, 0x8e, 0xe1, 0x65, - 0xcb, 0x66, 0x60, 0xc9, 0x0c, 0x96, 0xca, 0x7a, 0xaf, 0x9b, 0xbf, 0x55, 0x8a, 0x69, 0xf0, 0x90, - 0xa5, 0xc0, 0x7a, 0xd4, 0xaa, 0xb8, 0x67, 0xf4, 0xc8, 0xad, 0xd2, 0x96, 0xcb, 0x65, 0x81, 0x15, - 0xb6, 0x16, 0xd3, 0xe0, 0x21, 0xcb, 0xc2, 0xaf, 0x1a, 0x4a, 0xed, 0x1e, 0x57, 0x0e, 0xa9, 0x05, - 0x33, 0x18, 0x80, 0x4a, 0x6c, 0x00, 0xee, 0x5c, 0x53, 0x42, 0x11, 0xd4, 0xc4, 0xf6, 0x7f, 0x1f, - 0x94, 0x50, 0xd8, 0xa8, 0xf9, 0xdd, 0x44, 0x49, 0x97, 0x38, 0x20, 0x43, 0x5f, 0x0e, 0x31, 0x87, - 0xc4, 0x01, 0x2c, 0x35, 0xfa, 0x07, 0x68, 0xd1, 0xa5, 0x16, 0x54, 0xf6, 0x64, 0x00, 0xcb, 0xe5, - 0x5b, 0xca, 0x66, 0xf1, 0x50, 0x4a, 0xb1, 0xd2, 0xea, 0x8f, 0xd1, 0x0a, 0xa7, 0x1e, 0x6d, 0xd2, - 0x7a, 0xe7, 0x19, 0x74, 0xfc, 0x4c, 0x62, 0x33, 0xb1, 0xb5, 0x5c, 0x5e, 0xeb, 0x75, 0xf3, 0x2b, - 0x27, 0x11, 0x39, 0x8e, 0x59, 0x15, 0x7e, 0xd1, 0x50, 0x5a, 0x45, 0x34, 0x83, 0x71, 0xfc, 0x24, - 0x3e, 0x8e, 0xef, 0x4d, 0x51, 0xcb, 0x09, 0xc3, 0x68, 0x0e, 0xc2, 0x96, 0x93, 0x78, 0x82, 0x52, - 0x96, 0x2c, 0xa8, 0x9f, 0xd1, 0x24, 0xf5, 0xdd, 0x29, 0xa8, 0xd5, 0xb4, 0xdf, 0x56, 0x0e, 0x52, - 0xc1, 0xd9, 0xc7, 0x7d, 0xaa, 0xc2, 0x8f, 0x8b, 0x68, 0xe5, 0x38, 0xc0, 0xee, 0x36, 0x89, 0xef, - 0xcf, 0x60, 0xd8, 0x3e, 0x44, 0x69, 0x8f, 0xd1, 0xb6, 0xed, 0xdb, 0xd4, 0x05, 0xa6, 0x5a, 0xfe, - 0x96, 0x82, 0xa4, 0x6b, 0xa1, 0x0a, 0x47, 0xed, 0xf4, 0x26, 0x42, 0x1e, 0x61, 0xc4, 0x01, 0x2e, - 0x4a, 0x90, 0x90, 0x25, 0xd8, 0x79, 0x53, 0x09, 0xa2, 0x69, 0x19, 0xb5, 0x01, 0x74, 0xdf, 0xe5, - 0xac, 0x13, 0x86, 0x18, 0x2a, 0x70, 0x84, 0x5f, 0x6f, 0xa0, 0x55, 0x06, 0x66, 0x93, 0xd8, 0x4e, - 0x8d, 0x36, 0x6d, 0xb3, 0x93, 0x49, 0xca, 0x30, 0xf7, 0x7b, 0xdd, 0xfc, 0x2a, 0x8e, 0x2a, 0xae, - 0xba, 0xf9, 0x87, 0xa3, 0x2f, 0x8e, 0x51, 0x03, 0xe6, 0xdb, 0x3e, 0x07, 0x97, 0x3f, 0xa7, 0xcd, - 0x96, 0x03, 0x31, 0x0c, 0x8e, 0x73, 0x8b, 0xb9, 0x76, 0xc4, 0xad, 0x3f, 0xf2, 0xb8, 0x4d, 0x5d, - 0x3f, 0xb3, 0x10, 0xce, 0x75, 0x35, 0x22, 0xc7, 0x31, 0x2b, 0xfd, 0x00, 0x6d, 0x90, 0x66, 0x93, - 0x7e, 0x15, 0x38, 0xd8, 0xff, 0xda, 0x23, 0xae, 0x28, 0x55, 0x66, 0x51, 0x2e, 0x99, 0x4c, 0xaf, - 0x9b, 0xdf, 0x28, 0x8d, 0xd1, 0xe3, 0xb1, 0x28, 0xfd, 0x33, 0xb4, 0xde, 0x96, 0xa2, 0xb2, 0xed, - 0x5a, 0xb6, 0x5b, 0xaf, 0x52, 0x0b, 0x32, 0x29, 0x99, 0xf4, 0xbd, 0x5e, 0x37, 0xbf, 0xfe, 0x7c, - 0x58, 0x79, 0x35, 0x4e, 0x88, 0x47, 0x49, 0xf4, 0x97, 0x68, 0x5d, 0x7a, 0x04, 0x4b, 0x5d, 0x52, - 0x1b, 0xfc, 0xcc, 0x92, 0xec, 0xdf, 0x56, 0xb4, 0x7f, 0xa2, 0x74, 0x62, 0x90, 0xfa, 0x57, 0xf9, - 0x18, 0x9a, 0x60, 0x72, 0xca, 0x4e, 0x80, 0x39, 0xe5, 0x77, 0x55, 0xbf, 0xd6, 0x4b, 0xc3, 0x54, - 0x78, 0x94, 0x3d, 0xfb, 0x31, 0xba, 0x3d, 0xd4, 0x70, 0x7d, 0x0d, 0x25, 0x1a, 0xd0, 0x09, 0x96, - 0x10, 0x16, 0x3f, 0xf5, 0x0d, 0xb4, 0xd0, 0x26, 0xcd, 0x16, 0x04, 0x13, 0x88, 0x83, 0xc3, 0x93, - 0xf9, 0x1d, 0xad, 0xf0, 0x87, 0x86, 0xd6, 0xa2, 0xd3, 0x33, 0x83, 0xb5, 0x51, 0x8d, 0xaf, 0x8d, - 0xad, 0x69, 0x07, 0x7b, 0xc2, 0xee, 0xf8, 0x79, 0x1e, 0xad, 0x05, 0xcd, 0x09, 0xde, 0x28, 0x07, - 0x5c, 0x3e, 0x83, 0xab, 0x8d, 0x63, 0xef, 0xc8, 0xc3, 0x37, 0x25, 0x31, 0x1c, 0xdd, 0xa4, 0x07, - 0x45, 0x7f, 0x81, 0x16, 0x7d, 0x4e, 0x78, 0x4b, 0xdc, 0x79, 0xc1, 0xba, 0x7d, 0x23, 0x56, 0x89, - 0x0c, 0x1f, 0x94, 0xe0, 0x8c, 0x15, 0x63, 0xe1, 0x4f, 0x0d, 0x6d, 0x0c, 0x43, 0x66, 0xd0, 0xec, - 0x4f, 0xe3, 0xcd, 0xbe, 0x7f, 0x93, 0x8c, 0x26, 0x34, 0xfc, 0x0c, 0xbd, 0x3d, 0x92, 0x3b, 0x6d, - 0x31, 0x13, 0xc4, 0x9a, 0xf0, 0x86, 0x96, 0xd1, 0x61, 0xf8, 0x1c, 0xcb, 0x35, 0x51, 0x1b, 0xa3, - 0xc7, 0x63, 0x51, 0x85, 0xbf, 0xc6, 0x54, 0x4c, 0x3e, 0x4f, 0xf7, 0xd1, 0x52, 0xf0, 0xf9, 0x03, - 0x4c, 0x51, 0x0f, 0x2a, 0x50, 0x52, 0x72, 0x3c, 0xb0, 0x90, 0x4d, 0x95, 0xe1, 0xa9, 0x51, 0xb9, - 0x59, 0x53, 0x25, 0x32, 0xd2, 0x54, 0x79, 0xc6, 0x8a, 0x51, 0x44, 0x22, 0xbe, 0x17, 0x64, 0x92, - 0x89, 0x78, 0x24, 0x87, 0x4a, 0x8e, 0x07, 0x16, 0x85, 0x7f, 0x13, 0x63, 0x2a, 0x27, 0xa7, 0x23, - 0x92, 0x52, 0xff, 0xab, 0x6f, 0x38, 0x25, 0x6b, 0x90, 0x92, 0xa5, 0xff, 0xa4, 0x21, 0x9d, 0x0c, - 0x28, 0xaa, 0xfd, 0xe9, 0x09, 0x5a, 0xfc, 0xf4, 0xe6, 0x43, 0x6b, 0x94, 0x46, 0xc8, 0x82, 0xa7, - 0x2b, 0xab, 0x82, 0xd0, 0x47, 0x0d, 0xf0, 0x98, 0x08, 0x74, 0x1b, 0xa5, 0x03, 0xe9, 0x3e, 0x63, - 0x94, 0xa9, 0x5b, 0x74, 0xe7, 0xfa, 0x80, 0xa4, 0x79, 0x39, 0x27, 0x1e, 0xe5, 0x52, 0x88, 0xbf, - 0xea, 0xe6, 0xd3, 0x11, 0x3d, 0x8e, 0x72, 0x0b, 0x57, 0x16, 0x84, 0xae, 0x92, 0xff, 0xc3, 0xd5, - 0x1e, 0x4c, 0x76, 0x15, 0xe1, 0xce, 0xee, 0xa3, 0x77, 0x26, 0x14, 0xe8, 0x46, 0xab, 0xfe, 0x5b, - 0x0d, 0x45, 0x7d, 0xe8, 0x07, 0x28, 0x29, 0xfe, 0x99, 0xa9, 0x4b, 0x7f, 0x6f, 0xba, 0x4b, 0x7f, - 0x62, 0x3b, 0x10, 0xee, 0x2e, 0x71, 0xc2, 0x92, 0x45, 0xbf, 0x8b, 0x52, 0x0e, 0xf8, 0x3e, 0xa9, - 0x2b, 0xcf, 0xe1, 0x87, 0x58, 0x35, 0x10, 0xe3, 0xbe, 0xbe, 0xfc, 0xe0, 0xe2, 0x32, 0x37, 0xf7, - 0xea, 0x32, 0x37, 0xf7, 0xfa, 0x32, 0x37, 0xf7, 0x4d, 0x2f, 0xa7, 0x5d, 0xf4, 0x72, 0xda, 0xab, - 0x5e, 0x4e, 0x7b, 0xdd, 0xcb, 0x69, 0x7f, 0xf7, 0x72, 0xda, 0x0f, 0xff, 0xe4, 0xe6, 0x5e, 0xa4, - 0x54, 0xdd, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x78, 0xdc, 0x5e, 0x79, 0x76, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 6d9fe904..bb2cf345 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -45,7 +45,7 @@ message CSIDriver { // The driver name must be 63 characters or less, beginning and ending with // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the CSI Driver. @@ -55,7 +55,7 @@ message CSIDriver { // CSIDriverList is a collection of CSIDriver objects. message CSIDriverList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -93,8 +93,32 @@ message CSIDriverSpec { // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. // +optional optional bool podInfoOnMount = 2; + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + repeated string volumeLifecycleModes = 3; } // CSINode holds information about all CSI drivers installed on a node. @@ -144,12 +168,16 @@ message CSINodeDriver { // This can be empty if driver does not support topology. // +optional repeated string topologyKeys = 3; + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + optional VolumeNodeResources allocatable = 4; } // CSINodeList is a collection of CSINode objects. message CSINodeList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -173,7 +201,7 @@ message CSINodeSpec { // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -217,7 +245,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -231,7 +259,7 @@ message StorageClassList { // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -249,7 +277,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -265,6 +293,15 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -321,3 +358,13 @@ message VolumeError { optional string message = 2; } +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +message VolumeNodeResources { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + optional int32 count = 1; +} + diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 49005791..fa1bae1d 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -115,7 +115,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -136,7 +136,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -166,7 +166,14 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // Placeholder for *VolumeSource to accommodate inline volumes in pods. + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` } // VolumeAttachmentStatus is the status of a VolumeAttachment request. @@ -232,7 +239,7 @@ type CSIDriver struct { // The driver name must be 63 characters or less, beginning and ending with // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the CSI Driver. @@ -246,7 +253,7 @@ type CSIDriverList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -284,10 +291,59 @@ type CSIDriverSpec struct { // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` } +// VolumeLifecycleMode is an enumeration of possible usage modes for a volume +// provided by a CSI driver. More modes may be added in the future. +type VolumeLifecycleMode string + +const ( + // VolumeLifecyclePersistent explicitly confirms that the driver implements + // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not + // set. Such volumes are managed in Kubernetes via the persistent volume + // claim mechanism and have a lifecycle that is independent of the pods which + // use them. + VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" + + // VolumeLifecycleEphemeral indicates that the driver can be used for + // ephemeral inline volumes. Such volumes are specified inside the pod + // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have + // a lifecycle that is tied to the lifecycle of the pod. For example, such + // a volume might contain data that gets created specifically for that pod, + // like secrets. + // But how the volume actually gets created and managed is entirely up to + // the driver. It might also use reference counting to share the same volume + // instance among different pods if the CSIVolumeSource of those pods is + // identical. + VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -350,6 +406,20 @@ type CSINodeDriver struct { // This can be empty if driver does not support topology. // +optional TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +type VolumeNodeResources struct { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -359,7 +429,7 @@ type CSINodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index ec741ecf..fe80a8e5 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", - "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the CSI Driver.", } @@ -39,7 +39,7 @@ func (CSIDriver) SwaggerDoc() map[string]string { var map_CSIDriverList = map[string]string{ "": "CSIDriverList is a collection of CSIDriver objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CSIDriver", } @@ -48,9 +48,10 @@ func (CSIDriverList) SwaggerDoc() map[string]string { } var map_CSIDriverSpec = map[string]string{ - "": "CSIDriverSpec is the specification of a CSIDriver.", - "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID)", + "": "CSIDriverSpec is the specification of a CSIDriver.", + "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", + "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", + "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -72,6 +73,7 @@ var map_CSINodeDriver = map[string]string{ "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", } func (CSINodeDriver) SwaggerDoc() map[string]string { @@ -80,7 +82,7 @@ func (CSINodeDriver) SwaggerDoc() map[string]string { var map_CSINodeList = map[string]string{ "": "CSINodeList is a collection of CSINode objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CSINode", } @@ -99,7 +101,7 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -115,7 +117,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -125,7 +127,7 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -136,7 +138,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } @@ -186,4 +188,13 @@ func (VolumeError) SwaggerDoc() map[string]string { return map_VolumeError } +var map_VolumeNodeResources = map[string]string{ + "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", +} + +func (VolumeNodeResources) SwaggerDoc() map[string]string { + return map_VolumeNodeResources +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 022815f1..52433fcd 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -56,7 +56,7 @@ func (in *CSIDriver) DeepCopyObject() runtime.Object { func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CSIDriver, len(*in)) @@ -98,6 +98,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(bool) **out = **in } + if in.VolumeLifecycleModes != nil { + in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes + *out = make([]VolumeLifecycleMode, len(*in)) + copy(*out, *in) + } return } @@ -146,6 +151,11 @@ func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = new(VolumeNodeResources) + (*in).DeepCopyInto(*out) + } return } @@ -163,7 +173,7 @@ func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CSINode, len(*in)) @@ -279,7 +289,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -340,7 +350,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -377,6 +387,11 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(v1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } return } @@ -456,3 +471,24 @@ func (in *VolumeError) DeepCopy() *VolumeError { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. +func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { + if in == nil { + return nil + } + out := new(VolumeNodeResources) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go index 37b4d1df..3c7ac006 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go @@ -258,5 +258,31 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { } } + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return out } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go index 964a6190..52d6ea86 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go @@ -27,18 +27,19 @@ var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() // SetCRDCondition sets the status condition. It either overwrites the existing one or creates a new one. func SetCRDCondition(crd *CustomResourceDefinition, newCondition CustomResourceDefinitionCondition) { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + existingCondition := FindCRDCondition(crd, newCondition.Type) if existingCondition == nil { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) crd.Status.Conditions = append(crd.Status.Conditions, newCondition) return } - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status + if existingCondition.Status != newCondition.Status || existingCondition.LastTransitionTime.IsZero() { existingCondition.LastTransitionTime = newCondition.LastTransitionTime } + existingCondition.Status = newCondition.Status existingCondition.Reason = newCondition.Reason existingCondition.Message = newCondition.Message } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index 21873d46..8f502e8b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -73,13 +73,20 @@ type CustomResourceDefinitionSpec struct { // `conversion` defines conversion settings for the CRD. Conversion *CustomResourceConversion + + // preserveUnknownFields disables pruning of object fields which are not + // specified in the OpenAPI schema. apiVersion, kind, metadata and known + // fields inside metadata are always preserved. + // Defaults to true in v1beta and will default to false in v1. + PreserveUnknownFields *bool } // CustomResourceConversion describes how to convert different versions of a CR. type CustomResourceConversion struct { // `strategy` specifies the conversion strategy. Allowed values are: // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false. Strategy ConversionStrategyType // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. @@ -132,8 +139,6 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional Service *ServiceReference @@ -156,6 +161,11 @@ type ServiceReference struct { // this service. // +optional Path *string + + // If specified, the port on the service that hosting webhook. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port int32 } // CustomResourceDefinitionVersion describes a version for CRD. @@ -261,13 +271,34 @@ const ( // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in // the group and are therefore accepted. NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" ) // CustomResourceDefinitionCondition contains details for the current condition of this pod. type CustomResourceDefinitionCondition struct { - // Type is the type of the condition. + // Type is the type of the condition. Types include Established, NamesAccepted and Terminating. Type CustomResourceDefinitionConditionType // Status is the status of the condition. // Can be True, False, Unknown. @@ -368,8 +399,11 @@ type CustomResourceSubresourceScale struct { StatusReplicasPath string // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. + // Must be a JSON Path under .status or .spec. // Must be set to work with HPA. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource // If there is no value under the given path in the CustomResource, the status label selector value in the /scale // subresource will default to the empty string. // +optional diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index af78c34f..44169c76 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -55,6 +55,60 @@ type JSONSchemaProps struct { Definitions JSONSchemaDefinitions ExternalDocs *ExternalDocumentation Example *JSON + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. Both ObjectMeta and TypeMeta + // are validated automatically. x-kubernetes-preserve-unknown-fields + // must be true. + XEmbeddedResource bool + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + XListMapKeys []string + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar or an array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + XListType *string } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go new file mode 100644 index 00000000..70a2265c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go @@ -0,0 +1,212 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps, + Convert_apiextensions_JSON_To_v1_JSON, + Convert_v1_JSON_To_apiextensions_JSON, + ) + if err != nil { + return err + } + return nil +} + +func Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + if err := autoConvert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in, out, s); err != nil { + return err + } + if in.Default != nil && *(in.Default) == nil { + out.Default = nil + } + if in.Example != nil && *(in.Example) == nil { + out.Example = nil + } + return nil +} + +func Convert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + raw, err := json.Marshal(*in) + if err != nil { + return err + } + out.Raw = raw + return nil +} + +func Convert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + if in != nil { + var i interface{} + if err := json.Unmarshal(in.Raw, &i); err != nil { + return err + } + *out = i + } else { + out = nil + } + return nil +} + +func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + if err := autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in, out, s); err != nil { + return err + } + + if len(out.Versions) == 0 && len(in.Version) > 0 { + // no versions were specified, and a version name was specified + out.Versions = []CustomResourceDefinitionVersion{{Name: in.Version, Served: true, Storage: true}} + } + + // If spec.{subresources,validation,additionalPrinterColumns} exists, move to versions + if in.Subresources != nil { + subresources := &CustomResourceSubresources{} + if err := Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in.Subresources, subresources, s); err != nil { + return err + } + for i := range out.Versions { + out.Versions[i].Subresources = subresources + } + } + if in.Validation != nil { + schema := &CustomResourceValidation{} + if err := Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in.Validation, schema, s); err != nil { + return err + } + for i := range out.Versions { + out.Versions[i].Schema = schema + } + } + if in.AdditionalPrinterColumns != nil { + additionalPrinterColumns := make([]CustomResourceColumnDefinition, len(in.AdditionalPrinterColumns)) + for i := range in.AdditionalPrinterColumns { + if err := Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(&in.AdditionalPrinterColumns[i], &additionalPrinterColumns[i], s); err != nil { + return err + } + } + for i := range out.Versions { + out.Versions[i].AdditionalPrinterColumns = additionalPrinterColumns + } + } + return nil +} + +func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + if err := autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s); err != nil { + return nil + } + + if len(out.Versions) == 0 { + return nil + } + + // Copy versions[0] to version + out.Version = out.Versions[0].Name + + // If versions[*].{subresources,schema,additionalPrinterColumns} are identical, move to spec + subresources := out.Versions[0].Subresources + subresourcesIdentical := true + validation := out.Versions[0].Schema + validationIdentical := true + additionalPrinterColumns := out.Versions[0].AdditionalPrinterColumns + additionalPrinterColumnsIdentical := true + + // Detect if per-version fields are identical + for _, v := range out.Versions { + if subresourcesIdentical && !apiequality.Semantic.DeepEqual(v.Subresources, subresources) { + subresourcesIdentical = false + } + if validationIdentical && !apiequality.Semantic.DeepEqual(v.Schema, validation) { + validationIdentical = false + } + if additionalPrinterColumnsIdentical && !apiequality.Semantic.DeepEqual(v.AdditionalPrinterColumns, additionalPrinterColumns) { + additionalPrinterColumnsIdentical = false + } + } + + // If they are, set the top-level fields and clear the per-version fields + if subresourcesIdentical { + out.Subresources = subresources + } + if validationIdentical { + out.Validation = validation + } + if additionalPrinterColumnsIdentical { + out.AdditionalPrinterColumns = additionalPrinterColumns + } + for i := range out.Versions { + if subresourcesIdentical { + out.Versions[i].Subresources = nil + } + if validationIdentical { + out.Versions[i].Schema = nil + } + if additionalPrinterColumnsIdentical { + out.Versions[i].AdditionalPrinterColumns = nil + } + } + + return nil +} + +func Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + if err := autoConvert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in, out, s); err != nil { + return err + } + + out.WebhookClientConfig = nil + out.ConversionReviewVersions = nil + if in.Webhook != nil { + out.ConversionReviewVersions = in.Webhook.ConversionReviewVersions + if in.Webhook.ClientConfig != nil { + out.WebhookClientConfig = &apiextensions.WebhookClientConfig{} + if err := Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in.Webhook.ClientConfig, out.WebhookClientConfig, s); err != nil { + return err + } + } + } + return nil +} + +func Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + if err := autoConvert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in, out, s); err != nil { + return err + } + + out.Webhook = nil + if in.WebhookClientConfig != nil || in.ConversionReviewVersions != nil { + out.Webhook = &WebhookConversion{} + out.Webhook.ConversionReviewVersions = in.ConversionReviewVersions + if in.WebhookClientConfig != nil { + out.Webhook.ClientConfig = &WebhookClientConfig{} + if err := Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in.WebhookClientConfig, out.Webhook.ClientConfig, s); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go new file mode 100644 index 00000000..b8c44c69 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go @@ -0,0 +1,248 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// TODO: Update this after a tag is created for interface fields in DeepCopy +func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { + if in == nil { + return nil + } + out := new(JSONSchemaProps) + *out = *in + + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.MaxItems != nil { + in, out := &in.MaxItems, &out.MaxItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinItems != nil { + in, out := &in.MinItems, &out.MinItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MultipleOf != nil { + in, out := &in.MultipleOf, &out.MultipleOf + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxProperties != nil { + in, out := &in.MaxProperties, &out.MaxProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinProperties != nil { + in, out := &in.MinProperties, &out.MinProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.Items != nil { + in, out := &in.Items, &out.Items + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrArray) + (*in).DeepCopyInto(*out) + } + } + + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.Not != nil { + in, out := &in.Not, &out.Not + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaProps) + (*in).DeepCopyInto(*out) + } + } + + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.ExternalDocs != nil { + in, out := &in.ExternalDocs, &out.ExternalDocs + if *in == nil { + *out = nil + } else { + *out = new(ExternalDocumentation) + (*in).DeepCopyInto(*out) + } + } + + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go new file mode 100644 index 00000000..5cebec92 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinitionSpec(&obj.Spec) + if len(obj.Status.StoredVersions) == 0 { + for _, v := range obj.Spec.Versions { + if v.Storage { + obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) + break + } + } + } +} + +func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) { + if len(obj.Names.Singular) == 0 { + obj.Names.Singular = strings.ToLower(obj.Names.Kind) + } + if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { + obj.Names.ListKind = obj.Names.Kind + "List" + } + if obj.Conversion == nil { + obj.Conversion = &CustomResourceConversion{ + Strategy: NoneConverter, + } + } +} + +// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go new file mode 100644 index 00000000..09d4872f --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +groupName=apiextensions.k8s.io + +// Package v1 is the v1 version of the API. +package v1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go new file mode 100644 index 00000000..1eba9e37 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -0,0 +1,8966 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{0} +} +func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionRequest.Merge(m, src) +} +func (m *ConversionRequest) XXX_Size() int { + return m.Size() +} +func (m *ConversionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{1} +} +func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionResponse.Merge(m, src) +} +func (m *ConversionResponse) XXX_Size() int { + return m.Size() +} +func (m *ConversionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo + +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{2} +} +func (m *ConversionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionReview.Merge(m, src) +} +func (m *ConversionReview) XXX_Size() int { + return m.Size() +} +func (m *ConversionReview) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionReview proto.InternalMessageInfo + +func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } +func (*CustomResourceColumnDefinition) ProtoMessage() {} +func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{3} +} +func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) +} +func (m *CustomResourceColumnDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo + +func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } +func (*CustomResourceConversion) ProtoMessage() {} +func (*CustomResourceConversion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{4} +} +func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceConversion.Merge(m, src) +} +func (m *CustomResourceConversion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceConversion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo + +func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } +func (*CustomResourceDefinition) ProtoMessage() {} +func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{5} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } +func (*CustomResourceDefinitionCondition) ProtoMessage() {} +func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{6} +} +func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) +} +func (m *CustomResourceDefinitionCondition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } +func (*CustomResourceDefinitionList) ProtoMessage() {} +func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{7} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo + +func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } +func (*CustomResourceDefinitionNames) ProtoMessage() {} +func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{8} +} +func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) +} +func (m *CustomResourceDefinitionNames) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo + +func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } +func (*CustomResourceDefinitionSpec) ProtoMessage() {} +func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{9} +} +func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) +} +func (m *CustomResourceDefinitionSpec) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo + +func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } +func (*CustomResourceDefinitionStatus) ProtoMessage() {} +func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{10} +} +func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) +} +func (m *CustomResourceDefinitionStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo + +func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } +func (*CustomResourceDefinitionVersion) ProtoMessage() {} +func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{11} +} +func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) +} +func (m *CustomResourceDefinitionVersion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo + +func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } +func (*CustomResourceSubresourceScale) ProtoMessage() {} +func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{12} +} +func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) +} +func (m *CustomResourceSubresourceScale) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo + +func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } +func (*CustomResourceSubresourceStatus) ProtoMessage() {} +func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{13} +} +func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) +} +func (m *CustomResourceSubresourceStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo + +func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } +func (*CustomResourceSubresources) ProtoMessage() {} +func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{14} +} +func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresources.Merge(m, src) +} +func (m *CustomResourceSubresources) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresources) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo + +func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } +func (*CustomResourceValidation) ProtoMessage() {} +func (*CustomResourceValidation) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{15} +} +func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceValidation.Merge(m, src) +} +func (m *CustomResourceValidation) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceValidation) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{16} +} +func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocumentation.Merge(m, src) +} +func (m *ExternalDocumentation) XXX_Size() int { + return m.Size() +} +func (m *ExternalDocumentation) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo + +func (m *JSON) Reset() { *m = JSON{} } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{17} +} +func (m *JSON) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSON) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSON.Merge(m, src) +} +func (m *JSON) XXX_Size() int { + return m.Size() +} +func (m *JSON) XXX_DiscardUnknown() { + xxx_messageInfo_JSON.DiscardUnknown(m) +} + +var xxx_messageInfo_JSON proto.InternalMessageInfo + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{18} +} +func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaProps.Merge(m, src) +} +func (m *JSONSchemaProps) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaProps) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{19} +} +func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{20} +} +func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) +} +func (m *JSONSchemaPropsOrBool) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } +func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} +func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{21} +} +func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{22} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{23} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func (m *WebhookConversion) Reset() { *m = WebhookConversion{} } +func (*WebhookConversion) ProtoMessage() {} +func (*WebhookConversion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{24} +} +func (m *WebhookConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookConversion.Merge(m, src) +} +func (m *WebhookConversion) XXX_Size() int { + return m.Size() +} +func (m *WebhookConversion) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookConversion proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionRequest") + proto.RegisterType((*ConversionResponse)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionResponse") + proto.RegisterType((*ConversionReview)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionReview") + proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition") + proto.RegisterType((*CustomResourceConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion") + proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition") + proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionCondition") + proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionList") + proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames") + proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec") + proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus") + proto.RegisterType((*CustomResourceDefinitionVersion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion") + proto.RegisterType((*CustomResourceSubresourceScale)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale") + proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus") + proto.RegisterType((*CustomResourceSubresources)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources") + proto.RegisterType((*CustomResourceValidation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation") + proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation") + proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSON") + proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps") + proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.DefinitionsEntry") + proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.DependenciesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.PatternPropertiesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.PropertiesEntry") + proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray") + proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool") + proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ServiceReference") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig") + proto.RegisterType((*WebhookConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookConversion") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto", fileDescriptor_f5a35c9667703937) +} + +var fileDescriptor_f5a35c9667703937 = []byte{ + // 2919 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0xdf, 0x1e, 0x7f, 0x8d, 0xcb, 0xf6, 0xda, 0xae, 0x5d, 0x9b, 0x5e, 0x67, 0xd7, 0xe3, 0x9d, + 0x90, 0xe0, 0x84, 0xcd, 0x38, 0xbb, 0x24, 0x24, 0xe4, 0x00, 0xf2, 0xd8, 0x4e, 0x70, 0xd6, 0x5e, + 0x5b, 0x35, 0xbb, 0x1b, 0x27, 0x41, 0x4a, 0xca, 0xdd, 0xe5, 0x71, 0xc7, 0xfd, 0xb5, 0x5d, 0xdd, + 0x63, 0x5b, 0x02, 0x29, 0x02, 0x45, 0x40, 0x24, 0x08, 0x07, 0x04, 0xe2, 0x80, 0x10, 0x42, 0x39, + 0xc0, 0x01, 0x6e, 0xf0, 0x2f, 0xe4, 0x82, 0x94, 0x03, 0x42, 0x91, 0x90, 0x46, 0x64, 0xf8, 0x13, + 0x00, 0x21, 0x7c, 0x40, 0xa8, 0x3e, 0xba, 0xba, 0xa6, 0x67, 0x66, 0x77, 0xb5, 0x1e, 0x27, 0xb7, + 0x99, 0xf7, 0xf5, 0x7b, 0xf5, 0xea, 0xd5, 0xab, 0xf7, 0x6a, 0x06, 0xe0, 0x83, 0x17, 0x69, 0xc5, + 0x09, 0x96, 0x0e, 0x92, 0x5d, 0x12, 0xf9, 0x24, 0x26, 0x74, 0xa9, 0x41, 0x7c, 0x3b, 0x88, 0x96, + 0x24, 0x03, 0x87, 0x0e, 0x39, 0x8a, 0x89, 0x4f, 0x9d, 0xc0, 0xa7, 0xcf, 0xe0, 0xd0, 0xa1, 0x24, + 0x6a, 0x90, 0x68, 0x29, 0x3c, 0xa8, 0x33, 0x1e, 0x6d, 0x17, 0x58, 0x6a, 0x5c, 0x5f, 0xaa, 0x13, + 0x9f, 0x44, 0x38, 0x26, 0x76, 0x25, 0x8c, 0x82, 0x38, 0x80, 0x2f, 0x0a, 0x4b, 0x95, 0x36, 0xc1, + 0xb7, 0x94, 0xa5, 0x4a, 0x78, 0x50, 0x67, 0x3c, 0xda, 0x2e, 0x50, 0x69, 0x5c, 0x9f, 0x7b, 0xa6, + 0xee, 0xc4, 0xfb, 0xc9, 0x6e, 0xc5, 0x0a, 0xbc, 0xa5, 0x7a, 0x50, 0x0f, 0x96, 0xb8, 0xc1, 0xdd, + 0x64, 0x8f, 0x7f, 0xe3, 0x5f, 0xf8, 0x27, 0x01, 0x34, 0xf7, 0x5c, 0xe6, 0xb2, 0x87, 0xad, 0x7d, + 0xc7, 0x27, 0xd1, 0x71, 0xe6, 0xa7, 0x47, 0x62, 0xdc, 0xc5, 0xbd, 0xb9, 0xa5, 0x5e, 0x5a, 0x51, + 0xe2, 0xc7, 0x8e, 0x47, 0x3a, 0x14, 0xbe, 0xfa, 0x20, 0x05, 0x6a, 0xed, 0x13, 0x0f, 0xe7, 0xf5, + 0xca, 0x27, 0x06, 0x98, 0x5e, 0x09, 0xfc, 0x06, 0x89, 0xd8, 0x02, 0x11, 0xb9, 0x97, 0x10, 0x1a, + 0xc3, 0x2a, 0x18, 0x48, 0x1c, 0xdb, 0x34, 0x16, 0x8c, 0xc5, 0xd1, 0xea, 0xb3, 0x1f, 0x35, 0x4b, + 0xe7, 0x5a, 0xcd, 0xd2, 0xc0, 0x9d, 0xf5, 0xd5, 0x93, 0x66, 0xe9, 0x6a, 0x2f, 0xa4, 0xf8, 0x38, + 0x24, 0xb4, 0x72, 0x67, 0x7d, 0x15, 0x31, 0x65, 0xf8, 0x0a, 0x98, 0xb6, 0x09, 0x75, 0x22, 0x62, + 0x2f, 0x6f, 0xaf, 0xdf, 0x15, 0xf6, 0xcd, 0x02, 0xb7, 0x78, 0x49, 0x5a, 0x9c, 0x5e, 0xcd, 0x0b, + 0xa0, 0x4e, 0x1d, 0xb8, 0x03, 0x46, 0x82, 0xdd, 0x77, 0x88, 0x15, 0x53, 0x73, 0x60, 0x61, 0x60, + 0x71, 0xec, 0xc6, 0x33, 0x95, 0x6c, 0xf3, 0x94, 0x0b, 0x7c, 0xc7, 0xe4, 0x62, 0x2b, 0x08, 0x1f, + 0xae, 0xa5, 0x9b, 0x56, 0x9d, 0x94, 0x68, 0x23, 0x5b, 0xc2, 0x0a, 0x4a, 0xcd, 0x95, 0x7f, 0x53, + 0x00, 0x50, 0x5f, 0x3c, 0x0d, 0x03, 0x9f, 0x92, 0xbe, 0xac, 0x9e, 0x82, 0x29, 0x8b, 0x5b, 0x8e, + 0x89, 0x2d, 0x71, 0xcd, 0xc2, 0xa3, 0x78, 0x6f, 0x4a, 0xfc, 0xa9, 0x95, 0x9c, 0x39, 0xd4, 0x01, + 0x00, 0x6f, 0x83, 0xe1, 0x88, 0xd0, 0xc4, 0x8d, 0xcd, 0x81, 0x05, 0x63, 0x71, 0xec, 0xc6, 0xb5, + 0x9e, 0x50, 0x3c, 0xb5, 0x59, 0xf2, 0x55, 0x1a, 0xd7, 0x2b, 0xb5, 0x18, 0xc7, 0x09, 0xad, 0x9e, + 0x97, 0x48, 0xc3, 0x88, 0xdb, 0x40, 0xd2, 0x56, 0xf9, 0x7f, 0x06, 0x98, 0xd2, 0xa3, 0xd4, 0x70, + 0xc8, 0x21, 0x8c, 0xc0, 0x48, 0x24, 0x92, 0x85, 0xc7, 0x69, 0xec, 0xc6, 0xcd, 0xca, 0xa3, 0x9e, + 0xa8, 0x4a, 0x47, 0xfe, 0x55, 0xc7, 0xd8, 0x76, 0xc9, 0x2f, 0x28, 0x05, 0x82, 0x0d, 0x50, 0x8c, + 0xe4, 0x1e, 0xf1, 0x44, 0x1a, 0xbb, 0xb1, 0xd1, 0x1f, 0x50, 0x61, 0xb3, 0x3a, 0xde, 0x6a, 0x96, + 0x8a, 0xe9, 0x37, 0xa4, 0xb0, 0xca, 0xbf, 0x2a, 0x80, 0xf9, 0x95, 0x84, 0xc6, 0x81, 0x87, 0x08, + 0x0d, 0x92, 0xc8, 0x22, 0x2b, 0x81, 0x9b, 0x78, 0xfe, 0x2a, 0xd9, 0x73, 0x7c, 0x27, 0x66, 0x39, + 0xba, 0x00, 0x06, 0x7d, 0xec, 0x11, 0x99, 0x33, 0xe3, 0x32, 0x92, 0x83, 0xb7, 0xb0, 0x47, 0x10, + 0xe7, 0x30, 0x09, 0x96, 0x22, 0xf2, 0x04, 0x28, 0x89, 0xdb, 0xc7, 0x21, 0x41, 0x9c, 0x03, 0x9f, + 0x04, 0xc3, 0x7b, 0x41, 0xe4, 0x61, 0xb1, 0x7b, 0xa3, 0xd9, 0x7e, 0xbc, 0xcc, 0xa9, 0x48, 0x72, + 0xe1, 0xf3, 0x60, 0xcc, 0x26, 0xd4, 0x8a, 0x9c, 0x90, 0x41, 0x9b, 0x83, 0x5c, 0xf8, 0x82, 0x14, + 0x1e, 0x5b, 0xcd, 0x58, 0x48, 0x97, 0x83, 0xd7, 0x40, 0x31, 0x8c, 0x9c, 0x20, 0x72, 0xe2, 0x63, + 0x73, 0x68, 0xc1, 0x58, 0x1c, 0xaa, 0x4e, 0x49, 0x9d, 0xe2, 0xb6, 0xa4, 0x23, 0x25, 0xc1, 0xa4, + 0xdf, 0xa1, 0x81, 0xbf, 0x8d, 0xe3, 0x7d, 0x73, 0x98, 0x23, 0x28, 0xe9, 0x57, 0x6b, 0x5b, 0xb7, + 0x18, 0x1d, 0x29, 0x89, 0xf2, 0x5f, 0x0d, 0x60, 0xe6, 0x23, 0x94, 0x86, 0x17, 0xbe, 0x0c, 0x8a, + 0x34, 0x66, 0x35, 0xa7, 0x7e, 0x2c, 0xe3, 0xf3, 0x74, 0x6a, 0xaa, 0x26, 0xe9, 0x27, 0xcd, 0xd2, + 0x6c, 0xa6, 0x91, 0x52, 0x79, 0x6c, 0x94, 0x2e, 0x4b, 0xb9, 0x43, 0xb2, 0xbb, 0x1f, 0x04, 0x07, + 0x72, 0xf7, 0x4f, 0x91, 0x72, 0xaf, 0x09, 0x43, 0x19, 0xa6, 0x48, 0x39, 0x49, 0x46, 0x29, 0x50, + 0xf9, 0xbf, 0x85, 0xfc, 0xc2, 0xb4, 0x4d, 0x7f, 0x1b, 0x14, 0xd9, 0x11, 0xb2, 0x71, 0x8c, 0xe5, + 0x21, 0x78, 0xf6, 0xe1, 0x0e, 0x9c, 0x38, 0xaf, 0x9b, 0x24, 0xc6, 0x55, 0x28, 0x43, 0x01, 0x32, + 0x1a, 0x52, 0x56, 0xe1, 0x11, 0x18, 0xa4, 0x21, 0xb1, 0xe4, 0x7a, 0xef, 0x9e, 0x22, 0xdb, 0x7b, + 0xac, 0xa1, 0x16, 0x12, 0x2b, 0x4b, 0x46, 0xf6, 0x0d, 0x71, 0x44, 0xf8, 0xae, 0x01, 0x86, 0x29, + 0xaf, 0x0b, 0xb2, 0x96, 0xec, 0x9c, 0x01, 0x78, 0xae, 0xee, 0x88, 0xef, 0x48, 0xe2, 0x96, 0xff, + 0x55, 0x00, 0x57, 0x7b, 0xa9, 0xae, 0x04, 0xbe, 0x2d, 0x36, 0x61, 0x5d, 0x9e, 0x2b, 0x91, 0x59, + 0xcf, 0xeb, 0xe7, 0xea, 0xa4, 0x59, 0x7a, 0xe2, 0x81, 0x06, 0xb4, 0x03, 0xf8, 0x35, 0xb5, 0x64, + 0x71, 0x48, 0xaf, 0xb6, 0x3b, 0x76, 0xd2, 0x2c, 0x4d, 0x2a, 0xb5, 0x76, 0x5f, 0x61, 0x03, 0x40, + 0x17, 0xd3, 0xf8, 0x76, 0x84, 0x7d, 0x2a, 0xcc, 0x3a, 0x1e, 0x91, 0x91, 0x7b, 0xfa, 0xe1, 0x92, + 0x82, 0x69, 0x54, 0xe7, 0x24, 0x24, 0xdc, 0xe8, 0xb0, 0x86, 0xba, 0x20, 0xb0, 0x9a, 0x11, 0x11, + 0x4c, 0x55, 0x19, 0xd0, 0x6a, 0x38, 0xa3, 0x22, 0xc9, 0x85, 0x4f, 0x81, 0x11, 0x8f, 0x50, 0x8a, + 0xeb, 0x84, 0x9f, 0xfd, 0xd1, 0xec, 0x52, 0xdc, 0x14, 0x64, 0x94, 0xf2, 0xcb, 0xff, 0x36, 0xc0, + 0xe5, 0x5e, 0x51, 0xdb, 0x70, 0x68, 0x0c, 0xbf, 0xd5, 0x91, 0xf6, 0x95, 0x87, 0x5b, 0x21, 0xd3, + 0xe6, 0x49, 0xaf, 0x4a, 0x49, 0x4a, 0xd1, 0x52, 0xfe, 0x10, 0x0c, 0x39, 0x31, 0xf1, 0xd2, 0xdb, + 0x12, 0xf5, 0x3f, 0xed, 0xaa, 0x13, 0x12, 0x7e, 0x68, 0x9d, 0x01, 0x21, 0x81, 0x57, 0xfe, 0xb0, + 0x00, 0xae, 0xf4, 0x52, 0x61, 0x75, 0x9c, 0xb2, 0x60, 0x87, 0x6e, 0x12, 0x61, 0x57, 0x26, 0x9b, + 0x0a, 0xf6, 0x36, 0xa7, 0x22, 0xc9, 0x65, 0xb5, 0x93, 0x3a, 0x7e, 0x3d, 0x71, 0x71, 0x24, 0x33, + 0x49, 0x2d, 0xb8, 0x26, 0xe9, 0x48, 0x49, 0xc0, 0x0a, 0x00, 0x74, 0x3f, 0x88, 0x62, 0x8e, 0xc1, + 0x3b, 0x9c, 0xd1, 0xea, 0x79, 0x56, 0x11, 0x6a, 0x8a, 0x8a, 0x34, 0x09, 0x76, 0x91, 0x1c, 0x38, + 0xbe, 0x2d, 0x37, 0x5c, 0x9d, 0xdd, 0x9b, 0x8e, 0x6f, 0x23, 0xce, 0x61, 0xf8, 0xae, 0x43, 0x63, + 0x46, 0x91, 0xbb, 0xdd, 0x16, 0x70, 0x2e, 0xa9, 0x24, 0x18, 0xbe, 0xc5, 0x0a, 0x6c, 0x10, 0x39, + 0x84, 0x9a, 0xc3, 0x19, 0xfe, 0x8a, 0xa2, 0x22, 0x4d, 0xa2, 0xfc, 0xb7, 0xc1, 0xde, 0xf9, 0xc1, + 0x0a, 0x08, 0x7c, 0x1c, 0x0c, 0xd5, 0xa3, 0x20, 0x09, 0x65, 0x94, 0x54, 0xb4, 0x5f, 0x61, 0x44, + 0x24, 0x78, 0xf0, 0xdb, 0x60, 0xc8, 0x97, 0x0b, 0x66, 0x19, 0xf4, 0x5a, 0xff, 0xb7, 0x99, 0x47, + 0x2b, 0x43, 0x17, 0x81, 0x14, 0xa0, 0xf0, 0x39, 0x30, 0x44, 0xad, 0x20, 0x24, 0x32, 0x88, 0xf3, + 0xa9, 0x50, 0x8d, 0x11, 0x4f, 0x9a, 0xa5, 0x89, 0xd4, 0x1c, 0x27, 0x20, 0x21, 0x0c, 0xbf, 0x6f, + 0x80, 0xa2, 0xbc, 0x2e, 0xa8, 0x39, 0xc2, 0xd3, 0xf3, 0xf5, 0xfe, 0xfb, 0x2d, 0xdb, 0xde, 0x6c, + 0xcf, 0x24, 0x81, 0x22, 0x05, 0x0e, 0xbf, 0x6b, 0x00, 0x60, 0xa9, 0xbb, 0xcb, 0x1c, 0xe5, 0x31, + 0xec, 0xdb, 0x51, 0xd1, 0x6e, 0x45, 0x91, 0x08, 0x59, 0xab, 0xa4, 0xa1, 0xc2, 0x1a, 0x98, 0x09, + 0x23, 0xc2, 0x6d, 0xdf, 0xf1, 0x0f, 0xfc, 0xe0, 0xd0, 0x7f, 0xd9, 0x21, 0xae, 0x4d, 0x4d, 0xb0, + 0x60, 0x2c, 0x16, 0xab, 0x57, 0xa4, 0xff, 0x33, 0xdb, 0xdd, 0x84, 0x50, 0x77, 0xdd, 0xf2, 0x7b, + 0x03, 0xf9, 0x5e, 0x2b, 0x7f, 0x5f, 0xc0, 0x0f, 0xc4, 0xe2, 0x45, 0x1d, 0xa6, 0xa6, 0xc1, 0x37, + 0xe2, 0xcd, 0xfe, 0x6f, 0x84, 0xaa, 0xf5, 0xd9, 0x25, 0xad, 0x48, 0x14, 0x69, 0x2e, 0xc0, 0x9f, + 0x1a, 0x60, 0x02, 0x5b, 0x16, 0x09, 0x63, 0x62, 0x8b, 0x63, 0x5c, 0x38, 0xdb, 0xac, 0x9e, 0x91, + 0x0e, 0x4d, 0x2c, 0xeb, 0xa8, 0xa8, 0xdd, 0x09, 0xf8, 0x12, 0x38, 0x4f, 0xe3, 0x20, 0x22, 0x76, + 0x9a, 0x41, 0xb2, 0xba, 0xc0, 0x56, 0xb3, 0x74, 0xbe, 0xd6, 0xc6, 0x41, 0x39, 0xc9, 0xf2, 0x5f, + 0x06, 0x41, 0xe9, 0x01, 0x19, 0xfa, 0x10, 0x4d, 0xef, 0x93, 0x60, 0x98, 0xaf, 0xd4, 0xe6, 0x01, + 0x29, 0x6a, 0x57, 0x3d, 0xa7, 0x22, 0xc9, 0x65, 0xd7, 0x13, 0xc3, 0x67, 0xd7, 0xd3, 0x00, 0x17, + 0x54, 0xd7, 0x53, 0x4d, 0x90, 0x51, 0xca, 0x87, 0x0d, 0x30, 0x2c, 0x46, 0x59, 0x7e, 0x76, 0xfb, + 0x98, 0xf5, 0x77, 0xb1, 0xeb, 0xd8, 0x98, 0xef, 0x37, 0xe0, 0x2e, 0x72, 0x14, 0x24, 0xd1, 0xe0, + 0xfb, 0x06, 0x18, 0xa7, 0xc9, 0x6e, 0x24, 0xa5, 0x29, 0xaf, 0xac, 0x63, 0x37, 0x6e, 0xf7, 0x0b, + 0xbe, 0xa6, 0xd9, 0xae, 0x4e, 0xb5, 0x9a, 0xa5, 0x71, 0x9d, 0x82, 0xda, 0xb0, 0xe1, 0x1f, 0x0d, + 0x60, 0x62, 0x5b, 0xa4, 0x1f, 0x76, 0xb7, 0x23, 0xc7, 0x8f, 0x49, 0x24, 0x86, 0x12, 0x51, 0xc2, + 0xfb, 0xd8, 0xaf, 0xe5, 0x67, 0x9d, 0xea, 0x82, 0xdc, 0x1b, 0x73, 0xb9, 0x87, 0x07, 0xa8, 0xa7, + 0x6f, 0xe5, 0xff, 0x18, 0xf9, 0xe3, 0xad, 0xad, 0xb2, 0x66, 0x61, 0x97, 0xc0, 0x55, 0x30, 0xc5, + 0x3a, 0x50, 0x44, 0x42, 0xd7, 0xb1, 0x30, 0xe5, 0x13, 0x88, 0xc8, 0x30, 0x35, 0x0a, 0xd7, 0x72, + 0x7c, 0xd4, 0xa1, 0x01, 0x5f, 0x05, 0x50, 0xb4, 0x66, 0x6d, 0x76, 0xc4, 0x6d, 0xac, 0x9a, 0xac, + 0x5a, 0x87, 0x04, 0xea, 0xa2, 0x05, 0x57, 0xc0, 0xb4, 0x8b, 0x77, 0x89, 0x5b, 0x23, 0x2e, 0xb1, + 0xe2, 0x20, 0xe2, 0xa6, 0xc4, 0x8c, 0x36, 0xd3, 0x6a, 0x96, 0xa6, 0x37, 0xf2, 0x4c, 0xd4, 0x29, + 0x5f, 0xbe, 0x9a, 0x3f, 0x4f, 0xfa, 0xc2, 0x45, 0xc3, 0xfb, 0xb3, 0x02, 0x98, 0xeb, 0x9d, 0x14, + 0xf0, 0x3b, 0xaa, 0x3d, 0x15, 0x5d, 0xd7, 0xeb, 0x67, 0x90, 0x7a, 0xb2, 0x25, 0x07, 0x9d, 0xed, + 0x38, 0x3c, 0x66, 0x77, 0x26, 0x76, 0xd3, 0xd1, 0x7b, 0xe7, 0x2c, 0xd0, 0x99, 0xfd, 0xea, 0xa8, + 0xb8, 0x89, 0xb1, 0xcb, 0x2f, 0x5e, 0xec, 0x92, 0xf2, 0x87, 0x1d, 0xe3, 0x65, 0x76, 0x58, 0xe1, + 0x0f, 0x0c, 0x30, 0x19, 0x84, 0xc4, 0x5f, 0xde, 0x5e, 0xbf, 0xfb, 0x15, 0x71, 0x68, 0x65, 0x80, + 0xd6, 0x1f, 0xdd, 0x45, 0x36, 0xe3, 0x0a, 0x5b, 0xdb, 0x51, 0x10, 0xd2, 0xea, 0x85, 0x56, 0xb3, + 0x34, 0xb9, 0xd5, 0x8e, 0x82, 0xf2, 0xb0, 0x65, 0x0f, 0xcc, 0xac, 0x1d, 0xc5, 0x24, 0xf2, 0xb1, + 0xbb, 0x1a, 0x58, 0x89, 0x47, 0xfc, 0x58, 0xf8, 0x98, 0x1b, 0xd9, 0x8d, 0x87, 0x1c, 0xd9, 0xaf, + 0x80, 0x81, 0x24, 0x72, 0x65, 0xd6, 0x8e, 0xa9, 0x87, 0x28, 0xb4, 0x81, 0x18, 0xbd, 0x7c, 0x15, + 0x0c, 0x32, 0x3f, 0xe1, 0x25, 0x30, 0x10, 0xe1, 0x43, 0x6e, 0x75, 0xbc, 0x3a, 0xc2, 0x44, 0x10, + 0x3e, 0x44, 0x8c, 0x56, 0xfe, 0x45, 0x09, 0x4c, 0xe6, 0xd6, 0x02, 0xe7, 0x40, 0x41, 0xbd, 0x6e, + 0x01, 0x69, 0xb4, 0xb0, 0xbe, 0x8a, 0x0a, 0x8e, 0x0d, 0x5f, 0x50, 0xd5, 0x55, 0x80, 0x96, 0x54, + 0xc1, 0xe6, 0x54, 0xd6, 0x1a, 0x65, 0xe6, 0x98, 0x23, 0x69, 0x79, 0x64, 0x3e, 0x90, 0x3d, 0x79, + 0x2a, 0x84, 0x0f, 0x64, 0x0f, 0x31, 0xda, 0xa3, 0xbe, 0x57, 0xa4, 0x0f, 0x26, 0x43, 0x0f, 0xf1, + 0x60, 0x32, 0x7c, 0xdf, 0x07, 0x93, 0xc7, 0xc1, 0x50, 0xec, 0xc4, 0x2e, 0x31, 0x47, 0xda, 0x1b, + 0xd2, 0xdb, 0x8c, 0x88, 0x04, 0x0f, 0x12, 0x30, 0x62, 0x93, 0x3d, 0x9c, 0xb8, 0xb1, 0x59, 0xe4, + 0xd9, 0xf3, 0xf5, 0xd3, 0x65, 0x8f, 0x78, 0x50, 0x58, 0x15, 0x26, 0x51, 0x6a, 0x1b, 0x3e, 0x01, + 0x46, 0x3c, 0x7c, 0xe4, 0x78, 0x89, 0xc7, 0xbb, 0x36, 0x43, 0x88, 0x6d, 0x0a, 0x12, 0x4a, 0x79, + 0xac, 0x08, 0x92, 0x23, 0xcb, 0x4d, 0xa8, 0xd3, 0x20, 0x92, 0x29, 0xdb, 0x2a, 0x55, 0x04, 0xd7, + 0x72, 0x7c, 0xd4, 0xa1, 0xc1, 0xc1, 0x1c, 0x9f, 0x2b, 0x8f, 0x69, 0x60, 0x82, 0x84, 0x52, 0x5e, + 0x3b, 0x98, 0x94, 0x1f, 0xef, 0x05, 0x26, 0x95, 0x3b, 0x34, 0xe0, 0x97, 0xc1, 0xa8, 0x87, 0x8f, + 0x36, 0x88, 0x5f, 0x8f, 0xf7, 0xcd, 0x89, 0x05, 0x63, 0x71, 0xa0, 0x3a, 0xd1, 0x6a, 0x96, 0x46, + 0x37, 0x53, 0x22, 0xca, 0xf8, 0x5c, 0xd8, 0xf1, 0xa5, 0xf0, 0x79, 0x4d, 0x38, 0x25, 0xa2, 0x8c, + 0xcf, 0xba, 0x83, 0x10, 0xc7, 0xec, 0x5c, 0x99, 0x93, 0xed, 0xc3, 0xeb, 0xb6, 0x20, 0xa3, 0x94, + 0x0f, 0x17, 0x41, 0xd1, 0xc3, 0x47, 0x7c, 0xae, 0x33, 0xa7, 0xb8, 0x59, 0xfe, 0xa8, 0xb7, 0x29, + 0x69, 0x48, 0x71, 0xb9, 0xa4, 0xe3, 0x0b, 0xc9, 0x69, 0x4d, 0x52, 0xd2, 0x90, 0xe2, 0xb2, 0xfc, + 0x4d, 0x7c, 0xe7, 0x5e, 0x42, 0x84, 0x30, 0xe4, 0x91, 0x51, 0xf9, 0x7b, 0x27, 0x63, 0x21, 0x5d, + 0x8e, 0xcd, 0x55, 0x5e, 0xe2, 0xc6, 0x4e, 0xe8, 0x92, 0xad, 0x3d, 0xf3, 0x02, 0x8f, 0x3f, 0x6f, + 0xa7, 0x37, 0x15, 0x15, 0x69, 0x12, 0xf0, 0x6d, 0x30, 0x48, 0xfc, 0xc4, 0x33, 0x2f, 0xf2, 0xeb, + 0xfb, 0xb4, 0xd9, 0xa7, 0xce, 0xcb, 0x9a, 0x9f, 0x78, 0x88, 0x5b, 0x86, 0x2f, 0x80, 0x09, 0x0f, + 0x1f, 0xb1, 0x22, 0x40, 0xa2, 0x98, 0x0d, 0x7b, 0x33, 0x7c, 0xdd, 0xd3, 0xac, 0x91, 0xdc, 0xd4, + 0x19, 0xa8, 0x5d, 0x8e, 0x2b, 0x3a, 0xbe, 0xa6, 0x38, 0xab, 0x29, 0xea, 0x0c, 0xd4, 0x2e, 0xc7, + 0x82, 0x1c, 0x91, 0x7b, 0x89, 0x13, 0x11, 0xdb, 0xfc, 0x02, 0xef, 0x3d, 0xe5, 0x1b, 0xab, 0xa0, + 0x21, 0xc5, 0x85, 0xf7, 0xd2, 0xb1, 0xdf, 0xe4, 0x87, 0x6f, 0xbb, 0x6f, 0xa5, 0x7b, 0x2b, 0x5a, + 0x8e, 0x22, 0x7c, 0x2c, 0x6e, 0x15, 0x7d, 0xe0, 0x87, 0x3e, 0x18, 0xc2, 0xae, 0xbb, 0xb5, 0x67, + 0x5e, 0xe2, 0x11, 0xef, 0xe3, 0x6d, 0xa1, 0x2a, 0xcc, 0x32, 0xb3, 0x8f, 0x04, 0x0c, 0xc3, 0x0b, + 0x7c, 0x96, 0x0b, 0x73, 0x67, 0x86, 0xb7, 0xc5, 0xec, 0x23, 0x01, 0xc3, 0xd7, 0xe7, 0x1f, 0x6f, + 0xed, 0x99, 0x8f, 0x9d, 0xdd, 0xfa, 0x98, 0x7d, 0x24, 0x60, 0xa0, 0x0d, 0x06, 0xfc, 0x20, 0x36, + 0x2f, 0xf7, 0xfb, 0xee, 0xe5, 0xb7, 0xc9, 0xad, 0x20, 0x46, 0xcc, 0x3c, 0xfc, 0x91, 0x01, 0x40, + 0x98, 0x65, 0xe2, 0x95, 0xd3, 0x8e, 0xe1, 0x39, 0xb4, 0x4a, 0x96, 0xbd, 0x6b, 0x7e, 0x1c, 0x1d, + 0x67, 0xb3, 0x9f, 0x96, 0xe5, 0x9a, 0x03, 0xf0, 0x97, 0x06, 0xb8, 0xa8, 0xb7, 0xbb, 0xca, 0xb3, + 0x79, 0x1e, 0x87, 0xad, 0x3e, 0x26, 0x72, 0x35, 0x08, 0xdc, 0xaa, 0xd9, 0x6a, 0x96, 0x2e, 0x2e, + 0x77, 0x01, 0x44, 0x5d, 0xdd, 0x80, 0xbf, 0x35, 0xc0, 0xb4, 0xac, 0x8e, 0x9a, 0x73, 0x25, 0x1e, + 0xb6, 0xb7, 0xfb, 0x18, 0xb6, 0x3c, 0x84, 0x88, 0x9e, 0xfa, 0xa5, 0xaf, 0x83, 0x8f, 0x3a, 0xbd, + 0x82, 0x7f, 0x30, 0xc0, 0xb8, 0x4d, 0x42, 0xe2, 0xdb, 0xc4, 0xb7, 0x98, 0x9b, 0x0b, 0xa7, 0x9d, + 0xed, 0xf3, 0x6e, 0xae, 0x6a, 0xd6, 0x85, 0x87, 0x15, 0xe9, 0xe1, 0xb8, 0xce, 0x3a, 0x69, 0x96, + 0x66, 0x33, 0x55, 0x9d, 0x83, 0xda, 0x1c, 0x84, 0x3f, 0x36, 0xc0, 0x64, 0x16, 0x76, 0x71, 0x41, + 0x5c, 0x3d, 0x9b, 0x8d, 0xe7, 0x2d, 0xe8, 0x72, 0x3b, 0x16, 0xca, 0x83, 0xc3, 0xdf, 0x19, 0xac, + 0xdb, 0x4a, 0x67, 0x35, 0x6a, 0x96, 0x79, 0x04, 0xdf, 0xe8, 0x67, 0x04, 0x95, 0x71, 0x11, 0xc0, + 0x6b, 0x59, 0x27, 0xa7, 0x38, 0x27, 0xcd, 0xd2, 0x8c, 0x1e, 0x3f, 0xc5, 0x40, 0xba, 0x73, 0xf0, + 0x3d, 0x03, 0x8c, 0x93, 0xac, 0x61, 0xa6, 0xe6, 0xe3, 0xa7, 0x0d, 0x5d, 0xd7, 0xf6, 0x5b, 0x8c, + 0xd3, 0x1a, 0x8b, 0xa2, 0x36, 0x58, 0xd6, 0xfb, 0x91, 0x23, 0xec, 0x85, 0x2e, 0x31, 0xbf, 0xd8, + 0xbf, 0xde, 0x6f, 0x4d, 0x98, 0x44, 0xa9, 0x6d, 0x78, 0x0d, 0x14, 0xfd, 0xc4, 0x75, 0xf1, 0xae, + 0x4b, 0xcc, 0x27, 0x78, 0x17, 0xa1, 0xde, 0xf8, 0x6e, 0x49, 0x3a, 0x52, 0x12, 0x70, 0x0f, 0x2c, + 0x1c, 0xdd, 0x54, 0x7f, 0x80, 0xe8, 0xfa, 0x88, 0x66, 0x3e, 0xc9, 0xad, 0xcc, 0xb5, 0x9a, 0xa5, + 0xd9, 0x9d, 0xee, 0xcf, 0x6c, 0x0f, 0xb4, 0x01, 0xdf, 0x04, 0x8f, 0x69, 0x32, 0x6b, 0xde, 0x2e, + 0xb1, 0x6d, 0x62, 0xa7, 0x83, 0x96, 0xf9, 0x25, 0x0e, 0xa1, 0xce, 0xf1, 0x4e, 0x5e, 0x00, 0xdd, + 0x4f, 0x1b, 0x6e, 0x80, 0x59, 0x8d, 0xbd, 0xee, 0xc7, 0x5b, 0x51, 0x2d, 0x8e, 0x1c, 0xbf, 0x6e, + 0x2e, 0x72, 0xbb, 0x17, 0xd3, 0xd3, 0xb7, 0xa3, 0xf1, 0x50, 0x0f, 0x1d, 0xf8, 0xcd, 0x36, 0x6b, + 0xfc, 0xc7, 0x03, 0x1c, 0xde, 0x24, 0xc7, 0xd4, 0x7c, 0x8a, 0x37, 0x17, 0x7c, 0x9f, 0x77, 0x34, + 0x3a, 0xea, 0x21, 0x0f, 0xbf, 0x01, 0x2e, 0xe4, 0x38, 0x6c, 0xae, 0x30, 0x9f, 0x16, 0x03, 0x02, + 0xeb, 0x44, 0x77, 0x52, 0x22, 0xea, 0x26, 0x39, 0xc7, 0xa6, 0xce, 0x5c, 0xb1, 0x83, 0x53, 0x60, + 0xe0, 0x80, 0xc8, 0xdf, 0x38, 0x11, 0xfb, 0x08, 0xdf, 0x02, 0x43, 0x0d, 0xec, 0x26, 0xe9, 0xcc, + 0xdc, 0xbf, 0x4b, 0x11, 0x09, 0xbb, 0x2f, 0x15, 0x5e, 0x34, 0xe6, 0x3e, 0x30, 0xc0, 0x6c, 0xf7, + 0xf2, 0xfb, 0x79, 0x79, 0xf4, 0x73, 0x03, 0x4c, 0x77, 0x54, 0xda, 0x2e, 0xce, 0xb8, 0xed, 0xce, + 0xdc, 0xed, 0x63, 0xc9, 0x14, 0x19, 0xc3, 0x5b, 0x3f, 0xdd, 0xb3, 0x1f, 0x1a, 0x60, 0x2a, 0x5f, + 0xc1, 0x3e, 0xa7, 0x28, 0x95, 0xdf, 0x2f, 0x80, 0xd9, 0xee, 0xcd, 0x2a, 0xf4, 0xd4, 0x18, 0xde, + 0xf7, 0x97, 0x8c, 0x6e, 0x6f, 0x9b, 0xef, 0x1a, 0x60, 0xec, 0x1d, 0x25, 0x97, 0xfe, 0xf4, 0xd6, + 0xcf, 0xe7, 0x93, 0xf4, 0x8e, 0xc8, 0x18, 0x14, 0xe9, 0x90, 0xe5, 0xdf, 0x1b, 0x60, 0xa6, 0xeb, + 0xbd, 0xc7, 0xa6, 0x7c, 0xec, 0xba, 0xc1, 0xa1, 0x78, 0xf6, 0xd2, 0xde, 0x90, 0x97, 0x39, 0x15, + 0x49, 0xae, 0x16, 0xb3, 0xc2, 0x67, 0x10, 0xb3, 0xf2, 0x9f, 0x0c, 0x70, 0xf9, 0x7e, 0x59, 0xf7, + 0x59, 0xef, 0xe1, 0x22, 0x28, 0xca, 0xae, 0xf4, 0x98, 0xef, 0x9f, 0x1c, 0xb5, 0x64, 0x45, 0xe0, + 0x7f, 0xed, 0x10, 0x9f, 0xca, 0xbf, 0x36, 0xc0, 0x54, 0x8d, 0x44, 0x0d, 0xc7, 0x22, 0x88, 0xec, + 0x91, 0x88, 0xf8, 0x16, 0x81, 0x4b, 0x60, 0x94, 0xff, 0x34, 0x16, 0x62, 0x2b, 0x7d, 0xd0, 0x9f, + 0x96, 0x81, 0x1e, 0xbd, 0x95, 0x32, 0x50, 0x26, 0xa3, 0x1e, 0xff, 0x0b, 0x3d, 0x1f, 0xff, 0x2f, + 0x83, 0xc1, 0x30, 0x7b, 0x29, 0x2d, 0x32, 0x2e, 0x7f, 0x1c, 0xe5, 0x54, 0xce, 0x0d, 0xa2, 0x98, + 0x3f, 0x07, 0x0d, 0x49, 0x6e, 0x10, 0xc5, 0x88, 0x53, 0xcb, 0x7f, 0x36, 0xc0, 0x85, 0xf4, 0x3f, + 0x1a, 0xae, 0x43, 0xfc, 0x78, 0x25, 0xf0, 0xf7, 0x9c, 0x3a, 0xbc, 0x24, 0x5e, 0xc4, 0xb4, 0x67, + 0xa6, 0xf4, 0x35, 0x0c, 0xde, 0x03, 0x23, 0x54, 0xac, 0x4a, 0x06, 0xfc, 0xd5, 0x47, 0x0f, 0x78, + 0x3e, 0x3c, 0xe2, 0x42, 0x4f, 0xa9, 0x29, 0x0e, 0x8b, 0xb9, 0x85, 0xab, 0x89, 0x6f, 0xcb, 0x57, + 0xd1, 0x71, 0x11, 0xf3, 0x95, 0x65, 0x41, 0x43, 0x8a, 0x5b, 0xfe, 0xa7, 0x01, 0xa6, 0x3b, 0xfe, + 0x73, 0x02, 0xbf, 0x67, 0x80, 0x71, 0x4b, 0x5b, 0x9e, 0xcc, 0xdc, 0xcd, 0xd3, 0xff, 0xaf, 0x45, + 0x33, 0x2a, 0x6e, 0x45, 0x9d, 0x82, 0xda, 0x40, 0xe1, 0x0e, 0x30, 0xad, 0xdc, 0xdf, 0xbb, 0x72, + 0x3f, 0x18, 0x5d, 0x6e, 0x35, 0x4b, 0xe6, 0x4a, 0x0f, 0x19, 0xd4, 0x53, 0xbb, 0xba, 0xf8, 0xd1, + 0xa7, 0xf3, 0xe7, 0x3e, 0xfe, 0x74, 0xfe, 0xdc, 0x27, 0x9f, 0xce, 0x9f, 0x7b, 0xb7, 0x35, 0x6f, + 0x7c, 0xd4, 0x9a, 0x37, 0x3e, 0x6e, 0xcd, 0x1b, 0x9f, 0xb4, 0xe6, 0x8d, 0xbf, 0xb7, 0xe6, 0x8d, + 0x9f, 0xfc, 0x63, 0xfe, 0xdc, 0x1b, 0x85, 0xc6, 0xf5, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xcd, + 0xae, 0x89, 0xe9, 0xf2, 0x29, 0x00, 0x00, +} + +func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DesiredAPIVersion) + copy(dAtA[i:], m.DesiredAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ConvertedObjects) > 0 { + for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ListKind) + copy(dAtA[i:], m.ListKind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) + i-- + dAtA[i] = 0x2a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x22 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Singular) + copy(dAtA[i:], m.Singular) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) + i-- + dAtA[i] = 0x12 + i -= len(m.Plural) + copy(dAtA[i:], m.Plural) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.PreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + if m.Conversion != nil { + { + size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StoredVersions) > 0 { + for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StoredVersions[iNdEx]) + copy(dAtA[i:], m.StoredVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LabelSelectorPath != nil { + i -= len(*m.LabelSelectorPath) + copy(dAtA[i:], *m.LabelSelectorPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.StatusReplicasPath) + copy(dAtA[i:], m.StatusReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.SpecReplicasPath) + copy(dAtA[i:], m.SpecReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scale != nil { + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OpenAPIV3Schema != nil { + { + size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSON) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSON) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XListType != nil { + i -= len(*m.XListType) + copy(dAtA[i:], *m.XListType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.XListMapKeys) > 0 { + for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.XListMapKeys[iNdEx]) + copy(dAtA[i:], m.XListMapKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + } + i-- + if m.XIntOrString { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + i-- + if m.XEmbeddedResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + if m.XPreserveUnknownFields != nil { + i-- + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + i-- + if m.Nullable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + if m.Example != nil { + { + size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.ExternalDocs != nil { + { + size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if len(m.Definitions) > 0 { + keysForDefinitions := make([]string, 0, len(m.Definitions)) + for k := range m.Definitions { + keysForDefinitions = append(keysForDefinitions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Definitions[string(keysForDefinitions[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefinitions[iNdEx]) + copy(dAtA[i:], keysForDefinitions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + } + if m.AdditionalItems != nil { + { + size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.Dependencies) > 0 { + keysForDependencies := make([]string, 0, len(m.Dependencies)) + for k := range m.Dependencies { + keysForDependencies = append(keysForDependencies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { + v := m.Dependencies[string(keysForDependencies[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDependencies[iNdEx]) + copy(dAtA[i:], keysForDependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if len(m.PatternProperties) > 0 { + keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) + for k := range m.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPatternProperties[iNdEx]) + copy(dAtA[i:], keysForPatternProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + } + if m.AdditionalProperties != nil { + { + size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if len(m.Properties) > 0 { + keysForProperties := make([]string, 0, len(m.Properties)) + for k := range m.Properties { + keysForProperties = append(keysForProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.Properties[string(keysForProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForProperties[iNdEx]) + copy(dAtA[i:], keysForProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + if m.Not != nil { + { + size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + } + if len(m.OneOf) > 0 { + for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.Items != nil { + { + size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.Required) > 0 { + for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Required[iNdEx]) + copy(dAtA[i:], m.Required[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.MinProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.MaxProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.MultipleOf != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x99 + } + i-- + if m.UniqueItems { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + if m.MinItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.MaxItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0x7a + if m.MinLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + i-- + dAtA[i] = 0x70 + } + if m.MaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + i-- + dAtA[i] = 0x68 + } + i-- + if m.ExclusiveMinimum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + if m.Minimum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i-- + dAtA[i] = 0x59 + } + i-- + if m.ExclusiveMaximum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + if m.Maximum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i-- + dAtA[i] = 0x49 + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0x3a + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x32 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + if m.Ref != nil { + i -= len(*m.Ref) + copy(dAtA[i:], *m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.JSONSchemas) > 0 { + for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Allows { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Property) > 0 { + for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Property[iNdEx]) + copy(dAtA[i:], m.Property[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WebhookConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookConversion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConversionReviewVersions) > 0 { + for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConversionReviewVersions[iNdEx]) + copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.ClientConfig != nil { + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConversionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DesiredAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConversionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ConvertedObjects) > 0 { + for _, e := range m.ConvertedObjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConversionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceColumnDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Priority)) + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceConversion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Plural) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Singular) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ListKind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Names.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Conversion != nil { + l = m.Conversion.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CustomResourceDefinitionStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AcceptedNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StoredVersions) > 0 { + for _, s := range m.StoredVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Subresources != nil { + l = m.Subresources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceSubresourceScale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpecReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StatusReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelectorPath != nil { + l = len(*m.LabelSelectorPath) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceSubresourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *CustomResourceSubresources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scale != nil { + l = m.Scale.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceValidation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OpenAPIV3Schema != nil { + l = m.OpenAPIV3Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalDocumentation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JSON) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaProps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Schema) + n += 1 + l + sovGenerated(uint64(l)) + if m.Ref != nil { + l = len(*m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Title) + n += 1 + l + sovGenerated(uint64(l)) + if m.Default != nil { + l = m.Default.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Maximum != nil { + n += 9 + } + n += 2 + if m.Minimum != nil { + n += 9 + } + n += 2 + if m.MaxLength != nil { + n += 1 + sovGenerated(uint64(*m.MaxLength)) + } + if m.MinLength != nil { + n += 1 + sovGenerated(uint64(*m.MinLength)) + } + l = len(m.Pattern) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxItems != nil { + n += 2 + sovGenerated(uint64(*m.MaxItems)) + } + if m.MinItems != nil { + n += 2 + sovGenerated(uint64(*m.MinItems)) + } + n += 3 + if m.MultipleOf != nil { + n += 10 + } + if len(m.Enum) > 0 { + for _, e := range m.Enum { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.MaxProperties != nil { + n += 2 + sovGenerated(uint64(*m.MaxProperties)) + } + if m.MinProperties != nil { + n += 2 + sovGenerated(uint64(*m.MinProperties)) + } + if len(m.Required) > 0 { + for _, s := range m.Required { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Items != nil { + l = m.Items.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllOf) > 0 { + for _, e := range m.AllOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.OneOf) > 0 { + for _, e := range m.OneOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AnyOf) > 0 { + for _, e := range m.AnyOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Not != nil { + l = m.Not.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Properties) > 0 { + for k, v := range m.Properties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalProperties != nil { + l = m.AdditionalProperties.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.PatternProperties) > 0 { + for k, v := range m.PatternProperties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Dependencies) > 0 { + for k, v := range m.Dependencies { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalItems != nil { + l = m.AdditionalItems.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Definitions) > 0 { + for k, v := range m.Definitions { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ExternalDocs != nil { + l = m.ExternalDocs.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Example != nil { + l = m.Example.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + n += 3 + if m.XPreserveUnknownFields != nil { + n += 3 + } + n += 3 + n += 3 + if len(m.XListMapKeys) > 0 { + for _, s := range m.XListMapKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.XListType != nil { + l = len(*m.XListType) + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaPropsOrArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.JSONSchemas) > 0 { + for _, e := range m.JSONSchemas { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JSONSchemaPropsOrBool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Property) > 0 { + for _, s := range m.Property { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookConversion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientConfig != nil { + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ConversionReviewVersions) > 0 { + for _, s := range m.ConversionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ConversionRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" + s := strings.Join([]string{`&ConversionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `}`, + }, "") + return s +} +func (this *ConversionResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForConvertedObjects := "[]RawExtension{" + for _, f := range this.ConvertedObjects { + repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConvertedObjects += "}" + s := strings.Join([]string{`&ConversionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConversionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConversionReview{`, + `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceColumnDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceColumnDefinition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Priority:` + fmt.Sprintf("%v", this.Priority) + `,`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceConversion{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookConversion", "WebhookConversion", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinition{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CustomResourceDefinitionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionNames) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionNames{`, + `Plural:` + fmt.Sprintf("%v", this.Plural) + `,`, + `Singular:` + fmt.Sprintf("%v", this.Singular) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `ListKind:` + fmt.Sprintf("%v", this.ListKind) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `PreserveUnknownFields:` + fmt.Sprintf("%v", this.PreserveUnknownFields) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionVersion) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" + s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Served:` + fmt.Sprintf("%v", this.Served) + `,`, + `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceScale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceScale{`, + `SpecReplicasPath:` + fmt.Sprintf("%v", this.SpecReplicasPath) + `,`, + `StatusReplicasPath:` + fmt.Sprintf("%v", this.StatusReplicasPath) + `,`, + `LabelSelectorPath:` + valueToStringGenerated(this.LabelSelectorPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceStatus{`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresources{`, + `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, + `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceValidation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceValidation{`, + `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalDocumentation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalDocumentation{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *JSON) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSON{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaProps) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnum := "[]JSON{" + for _, f := range this.Enum { + repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," + } + repeatedStringForEnum += "}" + repeatedStringForAllOf := "[]JSONSchemaProps{" + for _, f := range this.AllOf { + repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAllOf += "}" + repeatedStringForOneOf := "[]JSONSchemaProps{" + for _, f := range this.OneOf { + repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForOneOf += "}" + repeatedStringForAnyOf := "[]JSONSchemaProps{" + for _, f := range this.AnyOf { + repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAnyOf += "}" + keysForProperties := make([]string, 0, len(this.Properties)) + for k := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + keysForPatternProperties := make([]string, 0, len(this.PatternProperties)) + for k := range this.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + mapStringForPatternProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForPatternProperties { + mapStringForPatternProperties += fmt.Sprintf("%v: %v,", k, this.PatternProperties[k]) + } + mapStringForPatternProperties += "}" + keysForDependencies := make([]string, 0, len(this.Dependencies)) + for k := range this.Dependencies { + keysForDependencies = append(keysForDependencies, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + mapStringForDependencies := "JSONSchemaDependencies{" + for _, k := range keysForDependencies { + mapStringForDependencies += fmt.Sprintf("%v: %v,", k, this.Dependencies[k]) + } + mapStringForDependencies += "}" + keysForDefinitions := make([]string, 0, len(this.Definitions)) + for k := range this.Definitions { + keysForDefinitions = append(keysForDefinitions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + mapStringForDefinitions := "JSONSchemaDefinitions{" + for _, k := range keysForDefinitions { + mapStringForDefinitions += fmt.Sprintf("%v: %v,", k, this.Definitions[k]) + } + mapStringForDefinitions += "}" + s := strings.Join([]string{`&JSONSchemaProps{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`, + `Ref:` + valueToStringGenerated(this.Ref) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Title:` + fmt.Sprintf("%v", this.Title) + `,`, + `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, + `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, + `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, + `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, + `ExclusiveMinimum:` + fmt.Sprintf("%v", this.ExclusiveMinimum) + `,`, + `MaxLength:` + valueToStringGenerated(this.MaxLength) + `,`, + `MinLength:` + valueToStringGenerated(this.MinLength) + `,`, + `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, + `MaxItems:` + valueToStringGenerated(this.MaxItems) + `,`, + `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, + `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, + `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, + `Enum:` + repeatedStringForEnum + `,`, + `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, + `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, + `Required:` + fmt.Sprintf("%v", this.Required) + `,`, + `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, + `AllOf:` + repeatedStringForAllOf + `,`, + `OneOf:` + repeatedStringForOneOf + `,`, + `AnyOf:` + repeatedStringForAnyOf + `,`, + `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Properties:` + mapStringForProperties + `,`, + `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `PatternProperties:` + mapStringForPatternProperties + `,`, + `Dependencies:` + mapStringForDependencies + `,`, + `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `Definitions:` + mapStringForDefinitions + `,`, + `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, + `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, + `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, + `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, + `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, + `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, + `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, + `XListType:` + valueToStringGenerated(this.XListType) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrArray) String() string { + if this == nil { + return "nil" + } + repeatedStringForJSONSchemas := "[]JSONSchemaProps{" + for _, f := range this.JSONSchemas { + repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForJSONSchemas += "}" + s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrBool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, + `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrStringArray) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Property:` + fmt.Sprintf("%v", this.Property) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookConversion{`, + `ClientConfig:` + strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ConversionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConvertedObjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) + if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &ConversionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &ConversionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = ConversionStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Webhook == nil { + m.Webhook = &WebhookConversion{} + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomResourceDefinition{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plural = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Singular = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conversion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conversion == nil { + m.Conversion = &CustomResourceConversion{} + } + if err := m.Conversion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PreserveUnknownFields = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Served = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Storage = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &CustomResourceValidation{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StatusReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelectorPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.LabelSelectorPath = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &CustomResourceSubresourceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scale == nil { + m.Scale = &CustomResourceSubresourceScale{} + } + if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OpenAPIV3Schema == nil { + m.OpenAPIV3Schema = &JSONSchemaProps{} + } + if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSON) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSON: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = JSONSchemaURL(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Ref = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = &JSON{} + } + if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Maximum = &v2 + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMaximum = bool(v != 0) + case 11: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Minimum = &v2 + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMinimum = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxLength = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinLength = &v + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxItems = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinItems = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UniqueItems = bool(v != 0) + case 19: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.MultipleOf = &v2 + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enum = append(m.Enum, JSON{}) + if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxProperties = &v + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinProperties = &v + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Required = append(m.Required, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Items == nil { + m.Items = &JSONSchemaPropsOrArray{} + } + if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllOf = append(m.AllOf, JSONSchemaProps{}) + if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OneOf = append(m.OneOf, JSONSchemaProps{}) + if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnyOf = append(m.AnyOf, JSONSchemaProps{}) + if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Not == nil { + m.Not = &JSONSchemaProps{} + } + if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Properties == nil { + m.Properties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Properties[mapkey] = *mapvalue + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalProperties == nil { + m.AdditionalProperties = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PatternProperties == nil { + m.PatternProperties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PatternProperties[mapkey] = *mapvalue + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Dependencies == nil { + m.Dependencies = make(JSONSchemaDependencies) + } + var mapkey string + mapvalue := &JSONSchemaPropsOrStringArray{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaPropsOrStringArray{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Dependencies[mapkey] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalItems == nil { + m.AdditionalItems = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definitions == nil { + m.Definitions = make(JSONSchemaDefinitions) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Definitions[mapkey] = *mapvalue + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExternalDocs == nil { + m.ExternalDocs = &ExternalDocumentation{} + } + if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Example == nil { + m.Example = &JSON{} + } + if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Nullable = bool(v != 0) + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.XPreserveUnknownFields = &b + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XEmbeddedResource = bool(v != 0) + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XIntOrString = bool(v != 0) + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XListType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) + if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allows = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookConversion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientConfig == nil { + m.ClientConfig = &WebhookClientConfig{} + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConversionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConversionReviewVersions = append(m.ConversionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto new file mode 100644 index 00000000..2fbed9c1 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -0,0 +1,593 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ConversionRequest describes the conversion request parameters. +message ConversionRequest { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + optional string desiredAPIVersion = 2; + + // objects is the list of custom resource objects to be converted. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; +} + +// ConversionResponse describes a conversion response. +message ConversionResponse { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + optional string uid = 1; + + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; +} + +// ConversionReview describes a conversion request/response. +message ConversionReview { + // request describes the attributes for the conversion request. + // +optional + optional ConversionRequest request = 1; + + // response describes the attributes for the conversion response. + // +optional + optional ConversionResponse response = 2; +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +message CustomResourceColumnDefinition { + // name is a human readable name for the column. + optional string name = 1; + + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + optional string type = 2; + + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + optional string format = 3; + + // description is a human readable description of this column. + // +optional + optional string description = 4; + + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + optional int32 priority = 5; + + // jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + optional string jsonPath = 6; +} + +// CustomResourceConversion describes how to convert different versions of a CR. +message CustomResourceConversion { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. + optional string strategy = 1; + + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. + // +optional + optional WebhookConversion webhook = 2; +} + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +message CustomResourceDefinition { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes how the user wants the resources to appear + optional CustomResourceDefinitionSpec spec = 2; + + // status indicates the actual state of the CustomResourceDefinition + // +optional + optional CustomResourceDefinitionStatus status = 3; +} + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +message CustomResourceDefinitionCondition { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + optional string type = 1; + + // status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable message indicating details about last transition. + // +optional + optional string message = 5; +} + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +message CustomResourceDefinitionList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items list individual CustomResourceDefinition objects + repeated CustomResourceDefinition items = 2; +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +message CustomResourceDefinitionNames { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + optional string plural = 1; + + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + optional string singular = 2; + + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + repeated string shortNames = 3; + + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + optional string kind = 4; + + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + optional string listKind = 5; + + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + repeated string categories = 6; +} + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +message CustomResourceDefinitionSpec { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + optional string group = 1; + + // names specify the resource and kind names for the custom resource. + optional CustomResourceDefinitionNames names = 3; + + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. + optional string scope = 4; + + // versions is the list of all API versions of the defined custom resource. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + repeated CustomResourceDefinitionVersion versions = 7; + + // conversion defines conversion settings for the CRD. + // +optional + optional CustomResourceConversion conversion = 9; + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional + optional bool preserveUnknownFields = 10; +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +message CustomResourceDefinitionStatus { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + repeated CustomResourceDefinitionCondition conditions = 1; + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + optional CustomResourceDefinitionNames acceptedNames = 2; + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + repeated string storedVersions = 3; +} + +// CustomResourceDefinitionVersion describes a version for CRD. +message CustomResourceDefinitionVersion { + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + optional string name = 1; + + // served is a flag enabling/disabling this version from being served via REST APIs + optional bool served = 2; + + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + optional bool storage = 3; + + // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. + // +optional + optional CustomResourceValidation schema = 4; + + // subresources specify what subresources this version of the defined custom resource have. + // +optional + optional CustomResourceSubresources subresources = 5; + + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If no columns are specified, a single column displaying the age of the custom resource is used. + // +optional + repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; +} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +message CustomResourceSubresourceScale { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + optional string specReplicasPath = 1; + + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + optional string statusReplicasPath = 2; + + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + optional string labelSelectorPath = 3; +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +message CustomResourceSubresourceStatus { +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +message CustomResourceSubresources { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + optional CustomResourceSubresourceStatus status = 1; + + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + optional CustomResourceSubresourceScale scale = 2; +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +message CustomResourceValidation { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + optional JSONSchemaProps openAPIV3Schema = 1; +} + +// ExternalDocumentation allows referencing an external resource for extended documentation. +message ExternalDocumentation { + optional string description = 1; + + optional string url = 2; +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +message JSON { + optional bytes raw = 1; +} + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +message JSONSchemaProps { + optional string id = 1; + + optional string schema = 2; + + optional string ref = 3; + + optional string description = 4; + + optional string type = 5; + + optional string format = 6; + + optional string title = 7; + + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // Defaulting requires spec.preserveUnknownFields to be false. + optional JSON default = 8; + + optional double maximum = 9; + + optional bool exclusiveMaximum = 10; + + optional double minimum = 11; + + optional bool exclusiveMinimum = 12; + + optional int64 maxLength = 13; + + optional int64 minLength = 14; + + optional string pattern = 15; + + optional int64 maxItems = 16; + + optional int64 minItems = 17; + + optional bool uniqueItems = 18; + + optional double multipleOf = 19; + + repeated JSON enum = 20; + + optional int64 maxProperties = 21; + + optional int64 minProperties = 22; + + repeated string required = 23; + + optional JSONSchemaPropsOrArray items = 24; + + repeated JSONSchemaProps allOf = 25; + + repeated JSONSchemaProps oneOf = 26; + + repeated JSONSchemaProps anyOf = 27; + + optional JSONSchemaProps not = 28; + + map properties = 29; + + optional JSONSchemaPropsOrBool additionalProperties = 30; + + map patternProperties = 31; + + map dependencies = 32; + + optional JSONSchemaPropsOrBool additionalItems = 33; + + map definitions = 34; + + optional ExternalDocumentation externalDocs = 35; + + optional JSON example = 36; + + optional bool nullable = 37; + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + optional bool xKubernetesPreserveUnknownFields = 38; + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + optional bool xKubernetesEmbeddedResource = 39; + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + optional bool xKubernetesIntOrString = 40; + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + repeated string xKubernetesListMapKeys = 41; + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + optional string xKubernetesListType = 42; +} + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +message JSONSchemaPropsOrArray { + optional JSONSchemaProps schema = 1; + + repeated JSONSchemaProps jSONSchemas = 2; +} + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +message JSONSchemaPropsOrBool { + optional bool allows = 1; + + optional JSONSchemaProps schema = 2; +} + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +message JSONSchemaPropsOrStringArray { + optional JSONSchemaProps schema = 1; + + repeated string property = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // namespace is the namespace of the service. + // Required + optional string namespace = 1; + + // name is the name of the service. + // Required + optional string name = 2; + + // path is an optional URL path at which the webhook will be contacted. + // +optional + optional string path = 3; + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + optional int32 port = 4; +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +message WebhookClientConfig { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + +// WebhookConversion describes how to call a conversion webhook +message WebhookConversion { + // clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // +optional + optional WebhookClientConfig clientConfig = 2; + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + repeated string conversionReviewVersions = 3; +} + diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go new file mode 100644 index 00000000..ba7f286e --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "errors" + + "k8s.io/apimachinery/pkg/util/json" +) + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrBool + switch { + case len(data) == 0: + case data[0] == '{': + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Allows = true + nw.Schema = &sch + case len(data) == 4 && string(data) == "true": + nw.Allows = true + case len(data) == 5 && string(data) == "false": + nw.Allows = false + default: + return errors.New("boolean or JSON schema expected") + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw JSONSchemaPropsOrStringArray + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return json.Marshal(s.JSONSchemas) + } + return json.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.JSONSchemas); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSON) MarshalJSON() ([]byte, error) { + if len(s.Raw) > 0 { + return s.Raw, nil + } + return []byte("null"), nil + +} + +func (s *JSON) UnmarshalJSON(data []byte) error { + if len(data) > 0 && string(data) != "null" { + s.Raw = data + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go new file mode 100644 index 00000000..a1b2b60a --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiextensions.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CustomResourceDefinition{}, + &CustomResourceDefinitionList{}, + &ConversionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go new file mode 100644 index 00000000..000f3fa1 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go @@ -0,0 +1,463 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ConversionStrategyType describes different conversion types. +type ConversionStrategyType string + +const ( + // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. + // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. + // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. + KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" + + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. + NoneConverter ConversionStrategyType = "None" + // WebhookConverter is a converter that calls to an external webhook to convert the CR. + WebhookConverter ConversionStrategyType = "Webhook" +) + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +type CustomResourceDefinitionSpec struct { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + // names specify the resource and kind names for the custom resource. + Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. + Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` + // versions is the list of all API versions of the defined custom resource. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + Versions []CustomResourceDefinitionVersion `json:"versions" protobuf:"bytes,7,rep,name=versions"` + + // conversion defines conversion settings for the CRD. + // +optional + Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional + PreserveUnknownFields bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` +} + +// CustomResourceConversion describes how to convert different versions of a CR. +type CustomResourceConversion struct { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. + Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` + + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. + // +optional + Webhook *WebhookConversion `json:"webhook,omitempty" protobuf:"bytes,2,opt,name=webhook"` +} + +// WebhookConversion describes how to call a conversion webhook +type WebhookConversion struct { + // clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // +optional + ClientConfig *WebhookClientConfig `json:"clientConfig,omitempty" protobuf:"bytes,2,name=clientConfig"` + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + ConversionReviewVersions []string `json:"conversionReviewVersions" protobuf:"bytes,3,rep,name=conversionReviewVersions"` +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +type WebhookClientConfig struct { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // namespace is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // name is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // path is an optional URL path at which the webhook will be contacted. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} + +// CustomResourceDefinitionVersion describes a version for CRD. +type CustomResourceDefinitionVersion struct { + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // served is a flag enabling/disabling this version from being served via REST APIs + Served bool `json:"served" protobuf:"varint,2,opt,name=served"` + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` + // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. + // +optional + Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` + // subresources specify what subresources this version of the defined custom resource have. + // +optional + Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If no columns are specified, a single column displaying the age of the custom resource is used. + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +type CustomResourceColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` + // description is a human readable description of this column. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` + // jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + JSONPath string `json:"jsonPath" protobuf:"bytes,6,opt,name=jsonPath"` +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +type CustomResourceDefinitionNames struct { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` +} + +// ResourceScope is an enum defining the different scopes available to a custom resource +type ResourceScope string + +const ( + ClusterScoped ResourceScope = "Cluster" + NamespaceScoped ResourceScope = "Namespaced" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type +type CustomResourceDefinitionConditionType string + +const ( + // Established means that the resource has become active. A resource is established when all names are + // accepted without a conflict for the first time. A resource stays established until deleted, even during + // a later NamesAccepted due to changed names. Note that not all names can be changed. + Established CustomResourceDefinitionConditionType = "Established" + // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in + // the group and are therefore accepted. + NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" + // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. + Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" +) + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +type CustomResourceDefinitionCondition struct { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` + // status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +type CustomResourceDefinitionStatus struct { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` +} + +// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of +// a CustomResourceDefinition +const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +type CustomResourceDefinition struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes how the user wants the resources to appear + Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status indicates the actual state of the CustomResourceDefinition + // +optional + Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items list individual CustomResourceDefinition objects + Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +type CustomResourceValidation struct { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +type CustomResourceSubresources struct { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +type CustomResourceSubresourceStatus struct{} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +type CustomResourceSubresourceScale struct { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConversionReview describes a conversion request/response. +type ConversionReview struct { + metav1.TypeMeta `json:",inline"` + // request describes the attributes for the conversion request. + // +optional + Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // response describes the attributes for the conversion response. + // +optional + Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// ConversionRequest describes the conversion request parameters. +type ConversionRequest struct { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` + // objects is the list of custom resource objects to be converted. + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` +} + +// ConversionResponse describes a conversion response. +type ConversionResponse struct { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + Result metav1.Status `json:"result" protobuf:"bytes,3,name=result"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go new file mode 100644 index 00000000..cd6021ba --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -0,0 +1,214 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +type JSONSchemaProps struct { + ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` + Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` + Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` + Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // Defaulting requires spec.preserveUnknownFields to be false. + Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` + Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` + Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` + MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` + MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` + Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` + MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` + MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` + UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` + MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` + Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` + MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` + MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` + Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` + Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` + AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` + OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` + Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` + Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` + AdditionalProperties *JSONSchemaPropsOrBool `json:"additionalProperties,omitempty" protobuf:"bytes,30,opt,name=additionalProperties"` + PatternProperties map[string]JSONSchemaProps `json:"patternProperties,omitempty" protobuf:"bytes,31,rep,name=patternProperties"` + Dependencies JSONSchemaDependencies `json:"dependencies,omitempty" protobuf:"bytes,32,opt,name=dependencies"` + AdditionalItems *JSONSchemaPropsOrBool `json:"additionalItems,omitempty" protobuf:"bytes,33,opt,name=additionalItems"` + Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` + Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` + Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +type JSON struct { + Raw []byte `protobuf:"bytes,1,opt,name=raw"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSON) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSON) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaURL represents a schema url. +type JSONSchemaURL string + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +type JSONSchemaPropsOrArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +type JSONSchemaPropsOrBool struct { + Allows bool `protobuf:"varint,1,opt,name=allows"` + Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrBool) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDependencies represent a dependencies property. +type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +type JSONSchemaPropsOrStringArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + Property []string `protobuf:"bytes,2,rep,name=property"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDefinitions contains the models explicitly defined in this spec. +type JSONSchemaDefinitions map[string]JSONSchemaProps + +// ExternalDocumentation allows referencing an external resource for extended documentation. +type ExternalDocumentation struct { + Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"` + URL string `json:"url,omitempty" protobuf:"bytes,2,opt,name=url"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go new file mode 100644 index 00000000..de5c8089 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go @@ -0,0 +1,1307 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*CustomResourceColumnDefinition)(nil), (*apiextensions.CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(a.(*CustomResourceColumnDefinition), b.(*apiextensions.CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceColumnDefinition)(nil), (*CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(a.(*apiextensions.CustomResourceColumnDefinition), b.(*CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinition)(nil), (*apiextensions.CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(a.(*CustomResourceDefinition), b.(*apiextensions.CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinition)(nil), (*CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(a.(*apiextensions.CustomResourceDefinition), b.(*CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionCondition)(nil), (*apiextensions.CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(a.(*CustomResourceDefinitionCondition), b.(*apiextensions.CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionCondition)(nil), (*CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(a.(*apiextensions.CustomResourceDefinitionCondition), b.(*CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionList)(nil), (*apiextensions.CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(a.(*CustomResourceDefinitionList), b.(*apiextensions.CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionList)(nil), (*CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(a.(*apiextensions.CustomResourceDefinitionList), b.(*CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionNames)(nil), (*apiextensions.CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(a.(*CustomResourceDefinitionNames), b.(*apiextensions.CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionNames)(nil), (*CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(a.(*apiextensions.CustomResourceDefinitionNames), b.(*CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionStatus)(nil), (*apiextensions.CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(a.(*CustomResourceDefinitionStatus), b.(*apiextensions.CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionStatus)(nil), (*CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(a.(*apiextensions.CustomResourceDefinitionStatus), b.(*CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionVersion)(nil), (*apiextensions.CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(a.(*CustomResourceDefinitionVersion), b.(*apiextensions.CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionVersion)(nil), (*CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(a.(*apiextensions.CustomResourceDefinitionVersion), b.(*CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceScale)(nil), (*apiextensions.CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(a.(*CustomResourceSubresourceScale), b.(*apiextensions.CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceScale)(nil), (*CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(a.(*apiextensions.CustomResourceSubresourceScale), b.(*CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceStatus)(nil), (*apiextensions.CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(a.(*CustomResourceSubresourceStatus), b.(*apiextensions.CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceStatus)(nil), (*CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(a.(*apiextensions.CustomResourceSubresourceStatus), b.(*CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresources)(nil), (*apiextensions.CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(a.(*CustomResourceSubresources), b.(*apiextensions.CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresources)(nil), (*CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(a.(*apiextensions.CustomResourceSubresources), b.(*CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceValidation)(nil), (*apiextensions.CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(a.(*CustomResourceValidation), b.(*apiextensions.CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceValidation)(nil), (*CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(a.(*apiextensions.CustomResourceValidation), b.(*CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExternalDocumentation)(nil), (*apiextensions.ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(a.(*ExternalDocumentation), b.(*apiextensions.ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ExternalDocumentation)(nil), (*ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(a.(*apiextensions.ExternalDocumentation), b.(*ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSON_To_v1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaProps)(nil), (*apiextensions.JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(a.(*JSONSchemaProps), b.(*apiextensions.JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrArray)(nil), (*apiextensions.JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(a.(*JSONSchemaPropsOrArray), b.(*apiextensions.JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrArray)(nil), (*JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(a.(*apiextensions.JSONSchemaPropsOrArray), b.(*JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrBool)(nil), (*apiextensions.JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(a.(*JSONSchemaPropsOrBool), b.(*apiextensions.JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrBool)(nil), (*JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(a.(*apiextensions.JSONSchemaPropsOrBool), b.(*JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrStringArray)(nil), (*apiextensions.JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(a.(*JSONSchemaPropsOrStringArray), b.(*apiextensions.JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrStringArray)(nil), (*JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(a.(*apiextensions.JSONSchemaPropsOrStringArray), b.(*JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ServiceReference_To_v1_ServiceReference(a.(*apiextensions.ServiceReference), b.(*ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookClientConfig)(nil), (*apiextensions.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(a.(*WebhookClientConfig), b.(*apiextensions.WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.WebhookClientConfig)(nil), (*WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(a.(*apiextensions.WebhookClientConfig), b.(*WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSON_To_v1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) + // WARNING: in.Webhook requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + out.Strategy = ConversionStrategyType(in.Strategy) + // WARNING: in.WebhookClientConfig requires manual conversion: does not exist in peer-type + // WARNING: in.ConversionReviewVersions requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = apiextensions.CustomResourceDefinitionConditionType(in.Type) + out.Status = apiextensions.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = CustomResourceDefinitionConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apiextensions.CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + if err := Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = apiextensions.ResourceScope(in.Scope) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]apiextensions.CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(apiextensions.CustomResourceConversion) + if err := Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + if err := metav1.Convert_bool_To_Pointer_bool(&in.PreserveUnknownFields, &out.PreserveUnknownFields, s); err != nil { + return err + } + return nil +} + +func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + // WARNING: in.Version requires manual conversion: does not exist in peer-type + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = ResourceScope(in.Scope) + // WARNING: in.Validation requires manual conversion: does not exist in peer-type + // WARNING: in.Subresources requires manual conversion: does not exist in peer-type + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + // WARNING: in.AdditionalPrinterColumns requires manual conversion: does not exist in peer-type + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + if err := Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + if err := metav1.Convert_Pointer_bool_To_bool(&in.PreserveUnknownFields, &out.PreserveUnknownFields, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]apiextensions.CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.CustomResourceValidation) + if err := Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + if err := Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*apiextensions.CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*apiextensions.CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in, out, s) +} + +func autoConvert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation is an autogenerated conversion function. +func Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + return autoConvert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in, out, s) +} + +func autoConvert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation is an autogenerated conversion function. +func Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + return autoConvert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in, out, s) +} + +func autoConvert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation is an autogenerated conversion function. +func Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + return autoConvert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in, out, s) +} + +func autoConvert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + // WARNING: in.Raw requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + // FIXME: Type apiextensions.JSON is unsupported. + return nil +} + +func autoConvert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = apiextensions.JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(apiextensions.JSON) + if err := Convert_v1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]apiextensions.JSON, len(*in)) + for i := range *in { + if err := Convert_v1_JSON_To_apiextensions_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(apiextensions.JSONSchemaPropsOrArray) + if err := Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(apiextensions.JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaPropsOrStringArray) + if err := Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(apiextensions.JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*apiextensions.ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(apiextensions.JSON) + if err := Convert_v1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.Nullable = in.Nullable + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + return nil +} + +// Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps is an autogenerated conversion function. +func Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Nullable = in.Nullable + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]JSON, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSON_To_v1_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(JSONSchemaPropsOrArray) + if err := Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaPropsOrStringArray) + if err := Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + return nil +} + +func autoConvert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := metav1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceReference_To_apiextensions_ServiceReference is an autogenerated conversion function. +func Convert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + return autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in, out, s) +} + +func autoConvert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := metav1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_ServiceReference_To_v1_ServiceReference is an autogenerated conversion function. +func Convert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + return autoConvert_apiextensions_ServiceReference_To_v1_ServiceReference(in, out, s) +} + +func autoConvert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiextensions.ServiceReference) + if err := Convert_v1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig is an autogenerated conversion function. +func Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in, out, s) +} + +func autoConvert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiextensions_ServiceReference_To_v1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig is an autogenerated conversion function. +func Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + return autoConvert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in, out, s) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..5fa85458 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -0,0 +1,663 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionRequest) DeepCopyInto(out *ConversionRequest) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionRequest. +func (in *ConversionRequest) DeepCopy() *ConversionRequest { + if in == nil { + return nil + } + out := new(ConversionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionResponse) DeepCopyInto(out *ConversionResponse) { + *out = *in + if in.ConvertedObjects != nil { + in, out := &in.ConvertedObjects, &out.ConvertedObjects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Result.DeepCopyInto(&out.Result) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionResponse. +func (in *ConversionResponse) DeepCopy() *ConversionResponse { + if in == nil { + return nil + } + out := new(ConversionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionReview) DeepCopyInto(out *ConversionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(ConversionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(ConversionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionReview. +func (in *ConversionReview) DeepCopy() *ConversionReview { + if in == nil { + return nil + } + out := new(ConversionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConversionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. +func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { + if in == nil { + return nil + } + out := new(CustomResourceColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConversion) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. +func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { + if in == nil { + return nil + } + out := new(CustomResourceConversion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. +func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { + if in == nil { + return nil + } + out := new(CustomResourceDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. +func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. +func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { + *out = *in + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. +func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { + *out = *in + in.Names.DeepCopyInto(&out.Names) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. +func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CustomResourceDefinitionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) + if in.StoredVersions != nil { + in, out := &in.StoredVersions, &out.StoredVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. +func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. +func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { + *out = *in + if in.LabelSelectorPath != nil { + in, out := &in.LabelSelectorPath, &out.LabelSelectorPath + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. +func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceScale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. +func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CustomResourceSubresourceStatus) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(CustomResourceSubresourceScale) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. +func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { + if in == nil { + return nil + } + out := new(CustomResourceSubresources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { + *out = *in + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. +func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { + if in == nil { + return nil + } + out := new(CustomResourceValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. +func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { + if in == nil { + return nil + } + out := new(ExternalDocumentation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSON) DeepCopyInto(out *JSON) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSON. +func (in *JSON) DeepCopy() *JSON { + if in == nil { + return nil + } + out := new(JSON) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { + { + in := &in + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. +func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { + if in == nil { + return nil + } + out := new(JSONSchemaDefinitions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { + { + in := &in + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. +func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { + if in == nil { + return nil + } + out := new(JSONSchemaDependencies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. +func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. +func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrBool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. +func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrStringArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConversion) DeepCopyInto(out *WebhookConversion) { + *out = *in + if in.ClientConfig != nil { + in, out := &in.ClientConfig, &out.ClientConfig + *out = new(WebhookClientConfig) + (*in).DeepCopyInto(*out) + } + if in.ConversionReviewVersions != nil { + in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConversion. +func (in *WebhookConversion) DeepCopy() *WebhookConversion { + if in == nil { + return nil + } + out := new(WebhookConversion) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go new file mode 100644 index 00000000..ed03cdd8 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go @@ -0,0 +1,57 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetObjectDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) + scheme.AddTypeDefaultingFunc(&CustomResourceDefinitionList{}, func(obj interface{}) { + SetObjectDefaults_CustomResourceDefinitionList(obj.(*CustomResourceDefinitionList)) + }) + return nil +} + +func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinition(in) + SetDefaults_CustomResourceDefinitionSpec(&in.Spec) + if in.Spec.Conversion != nil { + if in.Spec.Conversion.Webhook != nil { + if in.Spec.Conversion.Webhook.ClientConfig != nil { + if in.Spec.Conversion.Webhook.ClientConfig.Service != nil { + SetDefaults_ServiceReference(in.Spec.Conversion.Webhook.ClientConfig.Service) + } + } + } + } +} + +func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CustomResourceDefinition(a) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go index f6a114e2..a4560dc5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go @@ -234,5 +234,31 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { } } + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return out } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go index 7bea2d69..1a9c2238 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -20,13 +20,11 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) - // TODO figure out why I can't seem to get my defaulter generated - // return RegisterDefaults(scheme) - return nil + return RegisterDefaults(scheme) } func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { @@ -71,14 +69,14 @@ func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) if obj.Conversion.Strategy == WebhookConverter && len(obj.Conversion.ConversionReviewVersions) == 0 { obj.Conversion.ConversionReviewVersions = []string{SchemeGroupVersion.Version} } + if obj.PreserveUnknownFields == nil { + obj.PreserveUnknownFields = utilpointer.BoolPtr(true) + } } -// hasPerVersionColumns returns true if a CRD uses per-version columns. -func hasPerVersionColumns(versions []CustomResourceDefinitionVersion) bool { - for _, v := range versions { - if len(v.AdditionalPrinterColumns) > 0 { - return true - } +// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) } - return false } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index ea46688e..c28384c2 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -17,55 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto - - It has these top-level messages: - ConversionRequest - ConversionResponse - ConversionReview - CustomResourceColumnDefinition - CustomResourceConversion - CustomResourceDefinition - CustomResourceDefinitionCondition - CustomResourceDefinitionList - CustomResourceDefinitionNames - CustomResourceDefinitionSpec - CustomResourceDefinitionStatus - CustomResourceDefinitionVersion - CustomResourceSubresourceScale - CustomResourceSubresourceStatus - CustomResourceSubresources - CustomResourceValidation - ExternalDocumentation - JSON - JSONSchemaProps - JSONSchemaPropsOrArray - JSONSchemaPropsOrBool - JSONSchemaPropsOrStringArray - ServiceReference - WebhookClientConfig -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + encoding_binary "encoding/binary" + fmt "fmt" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import encoding_binary "encoding/binary" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -78,129 +48,677 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } -func (*ConversionRequest) ProtoMessage() {} -func (*ConversionRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{0} +} +func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionRequest.Merge(m, src) +} +func (m *ConversionRequest) XXX_Size() int { + return m.Size() +} +func (m *ConversionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{1} +} +func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionResponse.Merge(m, src) +} +func (m *ConversionResponse) XXX_Size() int { + return m.Size() +} +func (m *ConversionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo -func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } -func (*ConversionResponse) ProtoMessage() {} -func (*ConversionResponse) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{2} +} +func (m *ConversionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionReview.Merge(m, src) +} +func (m *ConversionReview) XXX_Size() int { + return m.Size() +} +func (m *ConversionReview) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionReview.DiscardUnknown(m) +} -func (m *ConversionReview) Reset() { *m = ConversionReview{} } -func (*ConversionReview) ProtoMessage() {} -func (*ConversionReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ConversionReview proto.InternalMessageInfo func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } func (*CustomResourceColumnDefinition) ProtoMessage() {} func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_98a4cc6918394e53, []int{3} +} +func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) +} +func (m *CustomResourceColumnDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo + func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } func (*CustomResourceConversion) ProtoMessage() {} func (*CustomResourceConversion) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_98a4cc6918394e53, []int{4} +} +func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceConversion.Merge(m, src) +} +func (m *CustomResourceConversion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceConversion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } func (*CustomResourceDefinition) ProtoMessage() {} func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_98a4cc6918394e53, []int{5} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo + func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } func (*CustomResourceDefinitionCondition) ProtoMessage() {} func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_98a4cc6918394e53, []int{6} +} +func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) +} +func (m *CustomResourceDefinitionCondition) XXX_Size() int { + return m.Size() } +func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } func (*CustomResourceDefinitionList) ProtoMessage() {} func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_98a4cc6918394e53, []int{7} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo + func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } func (*CustomResourceDefinitionNames) ProtoMessage() {} func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptor_98a4cc6918394e53, []int{8} +} +func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) +} +func (m *CustomResourceDefinitionNames) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } func (*CustomResourceDefinitionSpec) ProtoMessage() {} func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_98a4cc6918394e53, []int{9} +} +func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) +} +func (m *CustomResourceDefinitionSpec) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo + func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } func (*CustomResourceDefinitionStatus) ProtoMessage() {} func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_98a4cc6918394e53, []int{10} +} +func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) +} +func (m *CustomResourceDefinitionStatus) XXX_Size() int { + return m.Size() } +func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } func (*CustomResourceDefinitionVersion) ProtoMessage() {} func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_98a4cc6918394e53, []int{11} +} +func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) +} +func (m *CustomResourceDefinitionVersion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo + func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } func (*CustomResourceSubresourceScale) ProtoMessage() {} func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_98a4cc6918394e53, []int{12} +} +func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) +} +func (m *CustomResourceSubresourceScale) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } func (*CustomResourceSubresourceStatus) ProtoMessage() {} func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_98a4cc6918394e53, []int{13} +} +func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) +} +func (m *CustomResourceSubresourceStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo + func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } func (*CustomResourceSubresources) ProtoMessage() {} func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_98a4cc6918394e53, []int{14} +} +func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresources.Merge(m, src) +} +func (m *CustomResourceSubresources) XXX_Size() int { + return m.Size() } +func (m *CustomResourceSubresources) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } func (*CustomResourceValidation) ProtoMessage() {} func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptor_98a4cc6918394e53, []int{15} +} +func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceValidation.Merge(m, src) +} +func (m *CustomResourceValidation) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceValidation) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{16} +} +func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocumentation.Merge(m, src) +} +func (m *ExternalDocumentation) XXX_Size() int { + return m.Size() +} +func (m *ExternalDocumentation) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo + +func (m *JSON) Reset() { *m = JSON{} } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{17} +} +func (m *JSON) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSON) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSON.Merge(m, src) +} +func (m *JSON) XXX_Size() int { + return m.Size() +} +func (m *JSON) XXX_DiscardUnknown() { + xxx_messageInfo_JSON.DiscardUnknown(m) +} + +var xxx_messageInfo_JSON proto.InternalMessageInfo + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{18} +} +func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaProps.Merge(m, src) +} +func (m *JSONSchemaProps) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaProps) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) } -func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } -func (*ExternalDocumentation) ProtoMessage() {} -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo -func (m *JSON) Reset() { *m = JSON{} } -func (*JSON) ProtoMessage() {} -func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{19} +} +func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) +} -func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } -func (*JSONSchemaProps) ProtoMessage() {} -func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo -func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } -func (*JSONSchemaPropsOrArray) ProtoMessage() {} -func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{20} +} +func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) +} +func (m *JSONSchemaPropsOrBool) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) +} -func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } -func (*JSONSchemaPropsOrBool) ProtoMessage() {} -func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_98a4cc6918394e53, []int{21} +} +func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{22} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) } -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{23} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionRequest") @@ -222,16 +740,214 @@ func init() { proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation") proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSON") proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps") + proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DefinitionsEntry") + proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DependenciesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PatternPropertiesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PropertiesEntry") proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptor_98a4cc6918394e53) +} + +var fileDescriptor_98a4cc6918394e53 = []byte{ + // 2955 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x73, 0x23, 0x47, + 0xf5, 0xdf, 0x91, 0x2c, 0x5b, 0x6e, 0xdb, 0x6b, 0xbb, 0x77, 0xed, 0xcc, 0x3a, 0x1b, 0xcb, 0xab, + 0x7c, 0xb3, 0x5f, 0x27, 0xec, 0xca, 0xc9, 0x92, 0x90, 0x90, 0x2a, 0x8a, 0xb2, 0x6c, 0x27, 0x38, + 0x59, 0x5b, 0xa6, 0xb5, 0x9b, 0x18, 0xf2, 0xb3, 0xad, 0x69, 0xc9, 0xb3, 0x9e, 0x5f, 0x3b, 0x3d, + 0x23, 0xdb, 0x15, 0xa0, 0xf8, 0x51, 0x29, 0x28, 0x0a, 0x08, 0x45, 0x72, 0xa1, 0x0a, 0x0e, 0x81, + 0xe2, 0xc2, 0x01, 0x0e, 0x70, 0x83, 0x3f, 0x20, 0xc7, 0x14, 0xa7, 0x1c, 0x28, 0x15, 0xab, 0x5c, + 0x39, 0x52, 0x05, 0xe5, 0x13, 0xd5, 0x3f, 0xa6, 0x67, 0x34, 0x92, 0x76, 0x5d, 0x59, 0x29, 0xcb, + 0xcd, 0x7a, 0xbf, 0x3e, 0xaf, 0x5f, 0xbf, 0x7e, 0xfd, 0xfa, 0x8d, 0x41, 0xfd, 0xe0, 0x39, 0x5a, + 0x32, 0xdd, 0x95, 0x83, 0x70, 0x8f, 0xf8, 0x0e, 0x09, 0x08, 0x5d, 0x69, 0x12, 0xc7, 0x70, 0xfd, + 0x15, 0xc9, 0xc0, 0x9e, 0x49, 0x8e, 0x02, 0xe2, 0x50, 0xd3, 0x75, 0xe8, 0x55, 0xec, 0x99, 0x94, + 0xf8, 0x4d, 0xe2, 0xaf, 0x78, 0x07, 0x0d, 0xc6, 0xa3, 0x9d, 0x02, 0x2b, 0xcd, 0xa7, 0xf6, 0x48, + 0x80, 0x9f, 0x5a, 0x69, 0x10, 0x87, 0xf8, 0x38, 0x20, 0x46, 0xc9, 0xf3, 0xdd, 0xc0, 0x85, 0x5f, + 0x11, 0xe6, 0x4a, 0x1d, 0xd2, 0x6f, 0x29, 0x73, 0x25, 0xef, 0xa0, 0xc1, 0x78, 0xb4, 0x53, 0xa0, + 0x24, 0xcd, 0x2d, 0x5c, 0x6d, 0x98, 0xc1, 0x7e, 0xb8, 0x57, 0xaa, 0xb9, 0xf6, 0x4a, 0xc3, 0x6d, + 0xb8, 0x2b, 0xdc, 0xea, 0x5e, 0x58, 0xe7, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0x40, 0x5b, 0x78, 0x3a, + 0x76, 0xde, 0xc6, 0xb5, 0x7d, 0xd3, 0x21, 0xfe, 0x71, 0xec, 0xb1, 0x4d, 0x02, 0xbc, 0xd2, 0xec, + 0xf2, 0x71, 0x61, 0xa5, 0x9f, 0x96, 0x1f, 0x3a, 0x81, 0x69, 0x93, 0x2e, 0x85, 0x2f, 0xdd, 0x4b, + 0x81, 0xd6, 0xf6, 0x89, 0x8d, 0xd3, 0x7a, 0xc5, 0x13, 0x0d, 0xcc, 0xae, 0xb9, 0x4e, 0x93, 0xf8, + 0x6c, 0x95, 0x88, 0xdc, 0x0e, 0x09, 0x0d, 0x60, 0x19, 0x64, 0x43, 0xd3, 0xd0, 0xb5, 0x25, 0x6d, + 0x79, 0xbc, 0xfc, 0xe4, 0x47, 0xad, 0xc2, 0x99, 0x76, 0xab, 0x90, 0xbd, 0xb9, 0xb9, 0x7e, 0xd2, + 0x2a, 0x5c, 0xea, 0x87, 0x14, 0x1c, 0x7b, 0x84, 0x96, 0x6e, 0x6e, 0xae, 0x23, 0xa6, 0x0c, 0x5f, + 0x04, 0xb3, 0x06, 0xa1, 0xa6, 0x4f, 0x8c, 0xd5, 0x9d, 0xcd, 0x57, 0x84, 0x7d, 0x3d, 0xc3, 0x2d, + 0x5e, 0x90, 0x16, 0x67, 0xd7, 0xd3, 0x02, 0xa8, 0x5b, 0x07, 0xee, 0x82, 0x31, 0x77, 0xef, 0x16, + 0xa9, 0x05, 0x54, 0xcf, 0x2e, 0x65, 0x97, 0x27, 0xae, 0x5d, 0x2d, 0xc5, 0x3b, 0xa8, 0x5c, 0xe0, + 0xdb, 0x26, 0x17, 0x5b, 0x42, 0xf8, 0x70, 0x23, 0xda, 0xb9, 0xf2, 0xb4, 0x44, 0x1b, 0xab, 0x08, + 0x2b, 0x28, 0x32, 0x57, 0xfc, 0x6d, 0x06, 0xc0, 0xe4, 0xe2, 0xa9, 0xe7, 0x3a, 0x94, 0x0c, 0x64, + 0xf5, 0x14, 0xcc, 0xd4, 0xb8, 0xe5, 0x80, 0x18, 0x12, 0x57, 0xcf, 0x7c, 0x16, 0xef, 0x75, 0x89, + 0x3f, 0xb3, 0x96, 0x32, 0x87, 0xba, 0x00, 0xe0, 0x0d, 0x30, 0xea, 0x13, 0x1a, 0x5a, 0x81, 0x9e, + 0x5d, 0xd2, 0x96, 0x27, 0xae, 0x5d, 0xe9, 0x0b, 0xc5, 0xf3, 0x9b, 0x25, 0x5f, 0xa9, 0xf9, 0x54, + 0xa9, 0x1a, 0xe0, 0x20, 0xa4, 0xe5, 0xb3, 0x12, 0x69, 0x14, 0x71, 0x1b, 0x48, 0xda, 0x2a, 0xfe, + 0x28, 0x03, 0x66, 0x92, 0x51, 0x6a, 0x9a, 0xe4, 0x10, 0x1e, 0x82, 0x31, 0x5f, 0x24, 0x0b, 0x8f, + 0xd3, 0xc4, 0xb5, 0x9d, 0xd2, 0x7d, 0x1d, 0xab, 0x52, 0x57, 0x12, 0x96, 0x27, 0xd8, 0x9e, 0xc9, + 0x1f, 0x28, 0x42, 0x83, 0xef, 0x80, 0xbc, 0x2f, 0x37, 0x8a, 0x67, 0xd3, 0xc4, 0xb5, 0xaf, 0x0f, + 0x10, 0x59, 0x18, 0x2e, 0x4f, 0xb6, 0x5b, 0x85, 0x7c, 0xf4, 0x0b, 0x29, 0xc0, 0xe2, 0xfb, 0x19, + 0xb0, 0xb8, 0x16, 0xd2, 0xc0, 0xb5, 0x11, 0xa1, 0x6e, 0xe8, 0xd7, 0xc8, 0x9a, 0x6b, 0x85, 0xb6, + 0xb3, 0x4e, 0xea, 0xa6, 0x63, 0x06, 0x2c, 0x5b, 0x97, 0xc0, 0x88, 0x83, 0x6d, 0x22, 0xb3, 0x67, + 0x52, 0xc6, 0x74, 0x64, 0x1b, 0xdb, 0x04, 0x71, 0x0e, 0x93, 0x60, 0xc9, 0x22, 0xcf, 0x82, 0x92, + 0xb8, 0x71, 0xec, 0x11, 0xc4, 0x39, 0xf0, 0x32, 0x18, 0xad, 0xbb, 0xbe, 0x8d, 0xc5, 0x3e, 0x8e, + 0xc7, 0x3b, 0xf3, 0x02, 0xa7, 0x22, 0xc9, 0x85, 0xcf, 0x80, 0x09, 0x83, 0xd0, 0x9a, 0x6f, 0x7a, + 0x0c, 0x5a, 0x1f, 0xe1, 0xc2, 0xe7, 0xa4, 0xf0, 0xc4, 0x7a, 0xcc, 0x42, 0x49, 0x39, 0x78, 0x05, + 0xe4, 0x3d, 0xdf, 0x74, 0x7d, 0x33, 0x38, 0xd6, 0x73, 0x4b, 0xda, 0x72, 0xae, 0x3c, 0x23, 0x75, + 0xf2, 0x3b, 0x92, 0x8e, 0x94, 0x04, 0x5c, 0x02, 0xf9, 0x97, 0xaa, 0x95, 0xed, 0x1d, 0x1c, 0xec, + 0xeb, 0xa3, 0x1c, 0x61, 0x84, 0x49, 0xa3, 0xfc, 0x2d, 0x49, 0x2d, 0xfe, 0x3d, 0x03, 0xf4, 0x74, + 0x54, 0xa2, 0x90, 0xc2, 0x17, 0x40, 0x9e, 0x06, 0xac, 0xe2, 0x34, 0x8e, 0x65, 0x4c, 0x9e, 0x88, + 0xc0, 0xaa, 0x92, 0x7e, 0xd2, 0x2a, 0xcc, 0xc7, 0x1a, 0x11, 0x95, 0xc7, 0x43, 0xe9, 0xc2, 0x5f, + 0x6b, 0xe0, 0xdc, 0x21, 0xd9, 0xdb, 0x77, 0xdd, 0x83, 0x35, 0xcb, 0x24, 0x4e, 0xb0, 0xe6, 0x3a, + 0x75, 0xb3, 0x21, 0x73, 0x00, 0xdd, 0x67, 0x0e, 0xbc, 0xda, 0x6d, 0xb9, 0xfc, 0x50, 0xbb, 0x55, + 0x38, 0xd7, 0x83, 0x81, 0x7a, 0xf9, 0x01, 0x77, 0x81, 0x5e, 0x4b, 0x1d, 0x12, 0x59, 0xc0, 0x44, + 0xd9, 0x1a, 0x2f, 0x5f, 0x6c, 0xb7, 0x0a, 0xfa, 0x5a, 0x1f, 0x19, 0xd4, 0x57, 0xbb, 0xf8, 0x83, + 0x6c, 0x3a, 0xbc, 0x89, 0x74, 0x7b, 0x1b, 0xe4, 0xd9, 0x31, 0x36, 0x70, 0x80, 0xe5, 0x41, 0x7c, + 0xf2, 0x74, 0x87, 0x5e, 0xd4, 0x8c, 0x2d, 0x12, 0xe0, 0x32, 0x94, 0x1b, 0x02, 0x62, 0x1a, 0x52, + 0x56, 0xe1, 0xb7, 0xc1, 0x08, 0xf5, 0x48, 0x4d, 0x06, 0xfa, 0xb5, 0xfb, 0x3d, 0x6c, 0x7d, 0x16, + 0x52, 0xf5, 0x48, 0x2d, 0x3e, 0x0b, 0xec, 0x17, 0xe2, 0xb0, 0xf0, 0x5d, 0x0d, 0x8c, 0x52, 0x5e, + 0xa0, 0x64, 0x51, 0x7b, 0x63, 0x58, 0x1e, 0xa4, 0xaa, 0xa0, 0xf8, 0x8d, 0x24, 0x78, 0xf1, 0x5f, + 0x19, 0x70, 0xa9, 0x9f, 0xea, 0x9a, 0xeb, 0x18, 0x62, 0x3b, 0x36, 0xe5, 0xd9, 0x16, 0x99, 0xfe, + 0x4c, 0xf2, 0x6c, 0x9f, 0xb4, 0x0a, 0x8f, 0xdd, 0xd3, 0x40, 0xa2, 0x08, 0x7c, 0x59, 0xad, 0x5b, + 0x14, 0x8a, 0x4b, 0x9d, 0x8e, 0x9d, 0xb4, 0x0a, 0xd3, 0x4a, 0xad, 0xd3, 0x57, 0xd8, 0x04, 0xd0, + 0xc2, 0x34, 0xb8, 0xe1, 0x63, 0x87, 0x0a, 0xb3, 0xa6, 0x4d, 0x64, 0xf8, 0x9e, 0x38, 0x5d, 0x7a, + 0x30, 0x8d, 0xf2, 0x82, 0x84, 0x84, 0xd7, 0xbb, 0xac, 0xa1, 0x1e, 0x08, 0xac, 0x6e, 0xf9, 0x04, + 0x53, 0x55, 0x8a, 0x12, 0x37, 0x0a, 0xa3, 0x22, 0xc9, 0x85, 0x8f, 0x83, 0x31, 0x9b, 0x50, 0x8a, + 0x1b, 0x84, 0xd7, 0x9f, 0xf1, 0xf8, 0x8a, 0xde, 0x12, 0x64, 0x14, 0xf1, 0x59, 0x7f, 0x72, 0xb1, + 0x5f, 0xd4, 0xae, 0x9b, 0x34, 0x80, 0xaf, 0x77, 0x1d, 0x80, 0xd2, 0xe9, 0x56, 0xc8, 0xb4, 0x79, + 0xfa, 0xab, 0xe2, 0x17, 0x51, 0x12, 0xc9, 0xff, 0x2d, 0x90, 0x33, 0x03, 0x62, 0x47, 0x77, 0xf7, + 0xab, 0x43, 0xca, 0xbd, 0xf2, 0x94, 0xf4, 0x21, 0xb7, 0xc9, 0xd0, 0x90, 0x00, 0x2d, 0xfe, 0x2e, + 0x03, 0x1e, 0xe9, 0xa7, 0xc2, 0x2e, 0x14, 0xca, 0x22, 0xee, 0x59, 0xa1, 0x8f, 0x2d, 0x99, 0x71, + 0x2a, 0xe2, 0x3b, 0x9c, 0x8a, 0x24, 0x97, 0x95, 0x7c, 0x6a, 0x3a, 0x8d, 0xd0, 0xc2, 0xbe, 0x4c, + 0x27, 0xb5, 0xea, 0xaa, 0xa4, 0x23, 0x25, 0x01, 0x4b, 0x00, 0xd0, 0x7d, 0xd7, 0x0f, 0x38, 0x86, + 0xac, 0x5e, 0x67, 0x59, 0x81, 0xa8, 0x2a, 0x2a, 0x4a, 0x48, 0xb0, 0x1b, 0xed, 0xc0, 0x74, 0x0c, + 0xb9, 0xeb, 0xea, 0x14, 0xbf, 0x6c, 0x3a, 0x06, 0xe2, 0x1c, 0x86, 0x6f, 0x99, 0x34, 0x60, 0x14, + 0xb9, 0xe5, 0x1d, 0x51, 0xe7, 0x92, 0x4a, 0x82, 0xe1, 0xd7, 0x58, 0xd5, 0x77, 0x7d, 0x93, 0x50, + 0x7d, 0x34, 0xc6, 0x5f, 0x53, 0x54, 0x94, 0x90, 0x28, 0xfe, 0x33, 0xdf, 0x3f, 0x49, 0x58, 0x29, + 0x81, 0x8f, 0x82, 0x5c, 0xc3, 0x77, 0x43, 0x4f, 0x46, 0x49, 0x45, 0xfb, 0x45, 0x46, 0x44, 0x82, + 0xc7, 0xb2, 0xb2, 0xd9, 0xd1, 0xa6, 0xaa, 0xac, 0x8c, 0x9a, 0xd3, 0x88, 0x0f, 0xbf, 0xa7, 0x81, + 0x9c, 0x23, 0x83, 0xc3, 0x52, 0xee, 0xf5, 0x21, 0xe5, 0x05, 0x0f, 0x6f, 0xec, 0xae, 0x88, 0xbc, + 0x40, 0x86, 0x4f, 0x83, 0x1c, 0xad, 0xb9, 0x1e, 0x91, 0x51, 0x5f, 0x8c, 0x84, 0xaa, 0x8c, 0x78, + 0xd2, 0x2a, 0x4c, 0x45, 0xe6, 0x38, 0x01, 0x09, 0x61, 0xf8, 0x43, 0x0d, 0x80, 0x26, 0xb6, 0x4c, + 0x03, 0xf3, 0x96, 0x21, 0xc7, 0xdd, 0x1f, 0x6c, 0x5a, 0xbf, 0xa2, 0xcc, 0x8b, 0x4d, 0x8b, 0x7f, + 0xa3, 0x04, 0x34, 0x7c, 0x4f, 0x03, 0x93, 0x34, 0xdc, 0xf3, 0xa5, 0x16, 0xe5, 0xcd, 0xc5, 0xc4, + 0xb5, 0x6f, 0x0c, 0xd4, 0x97, 0x6a, 0x02, 0xa0, 0x3c, 0xd3, 0x6e, 0x15, 0x26, 0x93, 0x14, 0xd4, + 0xe1, 0x00, 0xfc, 0x89, 0x06, 0xf2, 0xcd, 0xe8, 0xce, 0x1e, 0xe3, 0x07, 0xfe, 0xcd, 0x21, 0x6d, + 0xac, 0xcc, 0xa8, 0xf8, 0x14, 0xa8, 0x3e, 0x40, 0x79, 0x00, 0xff, 0xa2, 0x01, 0x1d, 0x1b, 0xa2, + 0xc0, 0x63, 0x6b, 0xc7, 0x37, 0x9d, 0x80, 0xf8, 0xa2, 0xdf, 0xa4, 0x7a, 0x9e, 0xbb, 0x37, 0xd8, + 0xbb, 0x30, 0xdd, 0xcb, 0x96, 0x97, 0xa4, 0x77, 0xfa, 0x6a, 0x1f, 0x37, 0x50, 0x5f, 0x07, 0x79, + 0xa2, 0xc5, 0x2d, 0x8d, 0x3e, 0x3e, 0x84, 0x44, 0x8b, 0x7b, 0x29, 0x59, 0x1d, 0xe2, 0x0e, 0x2a, + 0x01, 0x0d, 0x2b, 0x60, 0xce, 0xf3, 0x09, 0x07, 0xb8, 0xe9, 0x1c, 0x38, 0xee, 0xa1, 0xf3, 0x82, + 0x49, 0x2c, 0x83, 0xea, 0x60, 0x49, 0x5b, 0xce, 0x97, 0x2f, 0xb4, 0x5b, 0x85, 0xb9, 0x9d, 0x5e, + 0x02, 0xa8, 0xb7, 0x5e, 0xf1, 0xbd, 0x6c, 0xfa, 0x15, 0x90, 0xee, 0x22, 0xe0, 0x07, 0x62, 0xf5, + 0x22, 0x36, 0x54, 0xd7, 0xf8, 0x6e, 0xbd, 0x3d, 0xa4, 0x64, 0x52, 0x6d, 0x40, 0xdc, 0xc9, 0x29, + 0x12, 0x45, 0x09, 0x3f, 0xe0, 0x2f, 0x35, 0x30, 0x85, 0x6b, 0x35, 0xe2, 0x05, 0xc4, 0x10, 0xc5, + 0x3d, 0xf3, 0x39, 0xd4, 0xaf, 0x39, 0xe9, 0xd5, 0xd4, 0x6a, 0x12, 0x1a, 0x75, 0x7a, 0x02, 0x9f, + 0x07, 0x67, 0x69, 0xe0, 0xfa, 0xc4, 0x48, 0xb5, 0xcd, 0xb0, 0xdd, 0x2a, 0x9c, 0xad, 0x76, 0x70, + 0x50, 0x4a, 0xb2, 0xf8, 0xe9, 0x08, 0x28, 0xdc, 0xe3, 0xa8, 0x9d, 0xe2, 0x61, 0x76, 0x19, 0x8c, + 0xf2, 0xe5, 0x1a, 0x3c, 0x2a, 0xf9, 0x44, 0x2b, 0xc8, 0xa9, 0x48, 0x72, 0xd9, 0x45, 0xc1, 0xf0, + 0x59, 0xfb, 0x92, 0xe5, 0x82, 0xea, 0xa2, 0xa8, 0x0a, 0x32, 0x8a, 0xf8, 0xf0, 0x1d, 0x30, 0x2a, + 0x06, 0x2f, 0xbc, 0x4a, 0x0f, 0xb1, 0xd2, 0x02, 0xee, 0x27, 0x87, 0x42, 0x12, 0xb2, 0xbb, 0xc2, + 0xe6, 0x1e, 0x74, 0x85, 0xbd, 0x6b, 0x49, 0x1b, 0xfd, 0x1f, 0x2f, 0x69, 0xc5, 0x7f, 0x6b, 0xe9, + 0x73, 0x9f, 0x58, 0x6a, 0xb5, 0x86, 0x2d, 0x02, 0xd7, 0xc1, 0x0c, 0x7b, 0xb5, 0x20, 0xe2, 0x59, + 0x66, 0x0d, 0x53, 0xfe, 0x68, 0x16, 0x09, 0xa7, 0xe6, 0x38, 0xd5, 0x14, 0x1f, 0x75, 0x69, 0xc0, + 0x97, 0x00, 0x14, 0x9d, 0x7c, 0x87, 0x1d, 0xd1, 0x94, 0xa8, 0x9e, 0xbc, 0xda, 0x25, 0x81, 0x7a, + 0x68, 0xc1, 0x35, 0x30, 0x6b, 0xe1, 0x3d, 0x62, 0x55, 0x89, 0x45, 0x6a, 0x81, 0xeb, 0x73, 0x53, + 0x62, 0xac, 0x30, 0xd7, 0x6e, 0x15, 0x66, 0xaf, 0xa7, 0x99, 0xa8, 0x5b, 0xbe, 0x78, 0x29, 0x7d, + 0xbc, 0x92, 0x0b, 0x17, 0xef, 0xa3, 0x0f, 0x33, 0x60, 0xa1, 0x7f, 0x66, 0xc0, 0xef, 0xc7, 0xcf, + 0x38, 0xd1, 0xa5, 0xbf, 0x39, 0xac, 0x2c, 0x94, 0xef, 0x38, 0xd0, 0xfd, 0x86, 0x83, 0xdf, 0x61, + 0x2d, 0x13, 0xb6, 0xa2, 0xc1, 0xd1, 0x1b, 0x43, 0x73, 0x81, 0x81, 0x94, 0xc7, 0x45, 0x37, 0x86, + 0x2d, 0xde, 0x7c, 0x61, 0x8b, 0x14, 0x7f, 0xaf, 0xa5, 0x5f, 0xf2, 0xf1, 0x09, 0x86, 0x3f, 0xd5, + 0xc0, 0xb4, 0xeb, 0x11, 0x67, 0x75, 0x67, 0xf3, 0x95, 0x2f, 0x8a, 0x93, 0x2c, 0x43, 0xb5, 0x7d, + 0x9f, 0x7e, 0xbe, 0x54, 0xad, 0x6c, 0x0b, 0x83, 0x3b, 0xbe, 0xeb, 0xd1, 0xf2, 0xb9, 0x76, 0xab, + 0x30, 0x5d, 0xe9, 0x84, 0x42, 0x69, 0xec, 0xa2, 0x0d, 0xe6, 0x36, 0x8e, 0x02, 0xe2, 0x3b, 0xd8, + 0x5a, 0x77, 0x6b, 0xa1, 0x4d, 0x9c, 0x40, 0x38, 0x9a, 0x9a, 0x3a, 0x69, 0xa7, 0x9c, 0x3a, 0x3d, + 0x02, 0xb2, 0xa1, 0x6f, 0xc9, 0x2c, 0x9e, 0x50, 0x53, 0x55, 0x74, 0x1d, 0x31, 0x7a, 0xf1, 0x12, + 0x18, 0x61, 0x7e, 0xc2, 0x0b, 0x20, 0xeb, 0xe3, 0x43, 0x6e, 0x75, 0xb2, 0x3c, 0xc6, 0x44, 0x10, + 0x3e, 0x44, 0x8c, 0x56, 0xfc, 0x4f, 0x01, 0x4c, 0xa7, 0xd6, 0x02, 0x17, 0x40, 0x46, 0x8d, 0x6a, + 0x81, 0x34, 0x9a, 0xd9, 0x5c, 0x47, 0x19, 0xd3, 0x80, 0xcf, 0xaa, 0xe2, 0x2b, 0x40, 0x0b, 0xaa, + 0x9e, 0x73, 0x2a, 0xeb, 0x91, 0x63, 0x73, 0xcc, 0x91, 0xa8, 0x70, 0x32, 0x1f, 0x48, 0x5d, 0x9e, + 0x12, 0xe1, 0x03, 0xa9, 0x23, 0x46, 0xfb, 0xac, 0x23, 0xb7, 0x68, 0xe6, 0x97, 0x3b, 0xc5, 0xcc, + 0x6f, 0xf4, 0xae, 0x33, 0xbf, 0x47, 0x41, 0x2e, 0x30, 0x03, 0x8b, 0xe8, 0x63, 0x9d, 0x4f, 0x99, + 0x1b, 0x8c, 0x88, 0x04, 0x0f, 0xde, 0x02, 0x63, 0x06, 0xa9, 0xe3, 0xd0, 0x0a, 0xf4, 0x3c, 0x4f, + 0xa1, 0xb5, 0x01, 0xa4, 0x90, 0x18, 0xc8, 0xae, 0x0b, 0xbb, 0x28, 0x02, 0x80, 0x8f, 0x81, 0x31, + 0x1b, 0x1f, 0x99, 0x76, 0x68, 0xf3, 0x26, 0x4f, 0x13, 0x62, 0x5b, 0x82, 0x84, 0x22, 0x1e, 0xab, + 0x8c, 0xe4, 0xa8, 0x66, 0x85, 0xd4, 0x6c, 0x12, 0xc9, 0x94, 0x0d, 0x98, 0xaa, 0x8c, 0x1b, 0x29, + 0x3e, 0xea, 0xd2, 0xe0, 0x60, 0xa6, 0xc3, 0x95, 0x27, 0x12, 0x60, 0x82, 0x84, 0x22, 0x5e, 0x27, + 0x98, 0x94, 0x9f, 0xec, 0x07, 0x26, 0x95, 0xbb, 0x34, 0xe0, 0x17, 0xc0, 0xb8, 0x8d, 0x8f, 0xae, + 0x13, 0xa7, 0x11, 0xec, 0xeb, 0x53, 0x4b, 0xda, 0x72, 0xb6, 0x3c, 0xd5, 0x6e, 0x15, 0xc6, 0xb7, + 0x22, 0x22, 0x8a, 0xf9, 0x5c, 0xd8, 0x74, 0xa4, 0xf0, 0xd9, 0x84, 0x70, 0x44, 0x44, 0x31, 0x9f, + 0x75, 0x10, 0x1e, 0x0e, 0xd8, 0xe1, 0xd2, 0xa7, 0x3b, 0x9f, 0x9a, 0x3b, 0x82, 0x8c, 0x22, 0x3e, + 0x5c, 0x06, 0x79, 0x1b, 0x1f, 0xf1, 0xb1, 0x80, 0x3e, 0xc3, 0xcd, 0xf2, 0xe1, 0xf4, 0x96, 0xa4, + 0x21, 0xc5, 0xe5, 0x92, 0xa6, 0x23, 0x24, 0x67, 0x13, 0x92, 0x92, 0x86, 0x14, 0x97, 0x25, 0x71, + 0xe8, 0x98, 0xb7, 0x43, 0x22, 0x84, 0x21, 0x8f, 0x8c, 0x4a, 0xe2, 0x9b, 0x31, 0x0b, 0x25, 0xe5, + 0xd8, 0xb3, 0xdc, 0x0e, 0xad, 0xc0, 0xf4, 0x2c, 0x52, 0xa9, 0xeb, 0xe7, 0x78, 0xfc, 0x79, 0xe3, + 0xbd, 0xa5, 0xa8, 0x28, 0x21, 0x01, 0x09, 0x18, 0x21, 0x4e, 0x68, 0xeb, 0xe7, 0xf9, 0xc5, 0x3e, + 0x90, 0x14, 0x54, 0x27, 0x67, 0xc3, 0x09, 0x6d, 0xc4, 0xcd, 0xc3, 0x67, 0xc1, 0x94, 0x8d, 0x8f, + 0x58, 0x39, 0x20, 0x7e, 0x60, 0x12, 0xaa, 0xcf, 0xf1, 0xc5, 0xcf, 0xb2, 0x8e, 0x73, 0x2b, 0xc9, + 0x40, 0x9d, 0x72, 0x5c, 0xd1, 0x74, 0x12, 0x8a, 0xf3, 0x09, 0xc5, 0x24, 0x03, 0x75, 0xca, 0xb1, + 0x48, 0xfb, 0xe4, 0x76, 0x68, 0xfa, 0xc4, 0xd0, 0x1f, 0xe2, 0x4d, 0xaa, 0xfc, 0x60, 0x20, 0x68, + 0x48, 0x71, 0x61, 0x33, 0x9a, 0x1f, 0xe9, 0xfc, 0x18, 0xde, 0x1c, 0x6c, 0x25, 0xaf, 0xf8, 0xab, + 0xbe, 0x8f, 0x8f, 0xc5, 0x4d, 0x93, 0x9c, 0x1c, 0x41, 0x0a, 0x72, 0xd8, 0xb2, 0x2a, 0x75, 0xfd, + 0x02, 0x8f, 0xfd, 0xa0, 0x6f, 0x10, 0x55, 0x75, 0x56, 0x19, 0x08, 0x12, 0x58, 0x0c, 0xd4, 0x75, + 0x58, 0x6a, 0x2c, 0x0c, 0x17, 0xb4, 0xc2, 0x40, 0x90, 0xc0, 0xe2, 0x2b, 0x75, 0x8e, 0x2b, 0x75, + 0xfd, 0xe1, 0x21, 0xaf, 0x94, 0x81, 0x20, 0x81, 0x05, 0x4d, 0x90, 0x75, 0xdc, 0x40, 0xbf, 0x38, + 0x94, 0xeb, 0x99, 0x5f, 0x38, 0xdb, 0x6e, 0x80, 0x18, 0x06, 0xfc, 0x85, 0x06, 0x80, 0x17, 0xa7, + 0xe8, 0x23, 0x03, 0x19, 0x4b, 0xa4, 0x20, 0x4b, 0x71, 0x6e, 0x6f, 0x38, 0x81, 0x7f, 0x1c, 0xbf, + 0x23, 0x13, 0x67, 0x20, 0xe1, 0x05, 0xfc, 0x8d, 0x06, 0xce, 0x27, 0xdb, 0x64, 0xe5, 0xde, 0x22, + 0x8f, 0xc8, 0x8d, 0x41, 0xa7, 0x79, 0xd9, 0x75, 0xad, 0xb2, 0xde, 0x6e, 0x15, 0xce, 0xaf, 0xf6, + 0x40, 0x45, 0x3d, 0x7d, 0x81, 0x7f, 0xd0, 0xc0, 0xac, 0xac, 0xa2, 0x09, 0x0f, 0x0b, 0x3c, 0x80, + 0x64, 0xd0, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x0f, 0xdd, 0x5d, 0x7c, 0xd4, 0xed, 0x1a, 0xfc, + 0xb3, 0x06, 0x26, 0x0d, 0xe2, 0x11, 0xc7, 0x20, 0x4e, 0x8d, 0xf9, 0xba, 0x34, 0x90, 0xb1, 0x41, + 0xda, 0xd7, 0xf5, 0x04, 0x84, 0x70, 0xb3, 0x24, 0xdd, 0x9c, 0x4c, 0xb2, 0x4e, 0x5a, 0x85, 0xf9, + 0x58, 0x35, 0xc9, 0x41, 0x1d, 0x5e, 0xc2, 0xf7, 0x35, 0x30, 0x1d, 0x6f, 0x80, 0xb8, 0x52, 0x2e, + 0x0d, 0x31, 0x0f, 0x78, 0xfb, 0xba, 0xda, 0x09, 0x88, 0xd2, 0x1e, 0xc0, 0x3f, 0x6a, 0xac, 0x53, + 0x8b, 0xde, 0x7d, 0x54, 0x2f, 0xf2, 0x58, 0xbe, 0x35, 0xf0, 0x58, 0x2a, 0x04, 0x11, 0xca, 0x2b, + 0x71, 0x2b, 0xa8, 0x38, 0x27, 0xad, 0xc2, 0x5c, 0x32, 0x92, 0x8a, 0x81, 0x92, 0x1e, 0xc2, 0x1f, + 0x6b, 0x60, 0x92, 0xc4, 0x1d, 0x37, 0xd5, 0x1f, 0x1d, 0x48, 0x10, 0x7b, 0x36, 0xf1, 0xe2, 0xa5, + 0x9e, 0x60, 0x51, 0xd4, 0x81, 0xcd, 0x3a, 0x48, 0x72, 0x84, 0x6d, 0xcf, 0x22, 0xfa, 0xff, 0x0d, + 0xb8, 0x83, 0xdc, 0x10, 0x76, 0x51, 0x04, 0x00, 0xaf, 0x80, 0xbc, 0x13, 0x5a, 0x16, 0xde, 0xb3, + 0x88, 0xfe, 0x18, 0xef, 0x45, 0xd4, 0x58, 0x74, 0x5b, 0xd2, 0x91, 0x92, 0x80, 0x75, 0xb0, 0x74, + 0xf4, 0xb2, 0xfa, 0x17, 0xa1, 0x9e, 0x83, 0x3b, 0xfd, 0x32, 0xb7, 0xb2, 0xd0, 0x6e, 0x15, 0xe6, + 0x77, 0x7b, 0x8f, 0xf6, 0xee, 0x69, 0x03, 0xbe, 0x06, 0x1e, 0x4e, 0xc8, 0x6c, 0xd8, 0x7b, 0xc4, + 0x30, 0x88, 0x11, 0x3d, 0xdc, 0xf4, 0xff, 0x17, 0xc3, 0xc3, 0xe8, 0x80, 0xef, 0xa6, 0x05, 0xd0, + 0xdd, 0xb4, 0xe1, 0x75, 0x30, 0x9f, 0x60, 0x6f, 0x3a, 0x41, 0xc5, 0xaf, 0x06, 0xbe, 0xe9, 0x34, + 0xf4, 0x65, 0x6e, 0xf7, 0x7c, 0x74, 0x22, 0x77, 0x13, 0x3c, 0xd4, 0x47, 0x07, 0x7e, 0xad, 0xc3, + 0x1a, 0xff, 0x8c, 0x85, 0xbd, 0x97, 0xc9, 0x31, 0xd5, 0x1f, 0xe7, 0xdd, 0x09, 0xdf, 0xec, 0xdd, + 0x04, 0x1d, 0xf5, 0x91, 0x87, 0x5f, 0x05, 0xe7, 0x52, 0x1c, 0xf6, 0x44, 0xd1, 0x9f, 0x10, 0x6f, + 0x0d, 0xd6, 0xcf, 0xee, 0x46, 0x44, 0xd4, 0x4b, 0x72, 0x81, 0xbd, 0x62, 0x53, 0x55, 0x10, 0xce, + 0x80, 0xec, 0x01, 0x91, 0x5f, 0xff, 0x11, 0xfb, 0x13, 0x1a, 0x20, 0xd7, 0xc4, 0x56, 0x18, 0x3d, + 0xc4, 0x07, 0x7c, 0x83, 0x22, 0x61, 0xfc, 0xf9, 0xcc, 0x73, 0xda, 0xc2, 0x07, 0x1a, 0x98, 0xef, + 0x5d, 0x9c, 0x1f, 0xa8, 0x5b, 0xbf, 0xd2, 0xc0, 0x6c, 0x57, 0x1d, 0xee, 0xe1, 0xd1, 0xed, 0x4e, + 0x8f, 0x5e, 0x1b, 0x74, 0x41, 0x15, 0x09, 0xc4, 0xbb, 0xc8, 0xa4, 0x7b, 0x3f, 0xd3, 0xc0, 0x4c, + 0xba, 0xb4, 0x3d, 0xc8, 0x78, 0x15, 0x3f, 0xc8, 0x80, 0xf9, 0xde, 0xcd, 0x2f, 0xf4, 0xd5, 0x2b, + 0x7f, 0x38, 0xd3, 0x92, 0x5e, 0x93, 0xd5, 0x77, 0x35, 0x30, 0x71, 0x4b, 0xc9, 0x45, 0x5f, 0x87, + 0x07, 0x3e, 0xa7, 0x89, 0xee, 0x92, 0x98, 0x41, 0x51, 0x12, 0xb7, 0xf8, 0x27, 0x0d, 0xcc, 0xf5, + 0xbc, 0x24, 0xe1, 0x65, 0x30, 0x8a, 0x2d, 0xcb, 0x3d, 0x14, 0xe3, 0xb6, 0xc4, 0x2c, 0x7b, 0x95, + 0x53, 0x91, 0xe4, 0x26, 0xa2, 0x97, 0xf9, 0xbc, 0xa2, 0x57, 0xfc, 0xab, 0x06, 0x2e, 0xde, 0x2d, + 0x13, 0x1f, 0xc8, 0x96, 0x2e, 0x83, 0xbc, 0x6c, 0x70, 0x8f, 0xf9, 0x76, 0xca, 0x37, 0x9d, 0x2c, + 0x1a, 0xfc, 0x1f, 0xa2, 0xc4, 0x5f, 0xc5, 0x0f, 0x35, 0x30, 0x53, 0x25, 0x7e, 0xd3, 0xac, 0x11, + 0x44, 0xea, 0xc4, 0x27, 0x4e, 0x8d, 0xc0, 0x15, 0x30, 0xce, 0x3f, 0xcb, 0x7a, 0xb8, 0x16, 0x7d, + 0x62, 0x98, 0x95, 0x21, 0x1f, 0xdf, 0x8e, 0x18, 0x28, 0x96, 0x51, 0x9f, 0x23, 0x32, 0x7d, 0x3f, + 0x47, 0x5c, 0x04, 0x23, 0x5e, 0x3c, 0xac, 0xcd, 0x33, 0x2e, 0x9f, 0xcf, 0x72, 0x2a, 0xe7, 0xba, + 0x7e, 0xc0, 0x27, 0x50, 0x39, 0xc9, 0x75, 0xfd, 0x00, 0x71, 0x6a, 0xf1, 0x6f, 0x1a, 0xe8, 0xf5, + 0xaf, 0x4b, 0xf0, 0x82, 0x18, 0xc2, 0x25, 0x26, 0x5b, 0xd1, 0x00, 0x0e, 0x36, 0xc1, 0x18, 0x15, + 0xab, 0x92, 0x51, 0xaf, 0xdc, 0x67, 0xd4, 0xd3, 0x31, 0x12, 0xb7, 0x7f, 0x44, 0x8d, 0xc0, 0x58, + 0xe0, 0x6b, 0xb8, 0x1c, 0x3a, 0x86, 0x9c, 0xcb, 0x4e, 0x8a, 0xc0, 0xaf, 0xad, 0x0a, 0x1a, 0x52, + 0xdc, 0xf2, 0xd5, 0x8f, 0xee, 0x2c, 0x9e, 0xf9, 0xf8, 0xce, 0xe2, 0x99, 0x4f, 0xee, 0x2c, 0x9e, + 0xf9, 0x6e, 0x7b, 0x51, 0xfb, 0xa8, 0xbd, 0xa8, 0x7d, 0xdc, 0x5e, 0xd4, 0x3e, 0x69, 0x2f, 0x6a, + 0xff, 0x68, 0x2f, 0x6a, 0x3f, 0xff, 0x74, 0xf1, 0xcc, 0x37, 0xc7, 0x24, 0xfe, 0x7f, 0x03, 0x00, + 0x00, 0xff, 0xff, 0x80, 0x3e, 0x52, 0x72, 0x50, 0x2c, 0x00, 0x00, +} + func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -239,37 +955,46 @@ func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { } func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) - i += copy(dAtA[i:], m.DesiredAPIVersion) if len(m.Objects) > 0 { - for _, msg := range m.Objects { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.DesiredAPIVersion) + copy(dAtA[i:], m.DesiredAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -277,41 +1002,51 @@ func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { } func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.ConvertedObjects) > 0 { - for _, msg := range m.ConvertedObjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n1, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ConvertedObjects) > 0 { + for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n1 - return i, nil + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ConversionReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -319,37 +1054,46 @@ func (m *ConversionReview) Marshal() (dAtA []byte, err error) { } func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Request.Size())) - n2, err := m.Request.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Response.Size())) - n3, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -357,40 +1101,50 @@ func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i += copy(dAtA[i:], m.Format) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) - dAtA[i] = 0x32 - i++ + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) - i += copy(dAtA[i:], m.JSONPath) - return i, nil + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -398,46 +1152,48 @@ func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) - i += copy(dAtA[i:], m.Strategy) - if m.WebhookClientConfig != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.WebhookClientConfig.Size())) - n4, err := m.WebhookClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } if len(m.ConversionReviewVersions) > 0 { - for _, s := range m.ConversionReviewVersions { + for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConversionReviewVersions[iNdEx]) + copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if m.WebhookClientConfig != nil { + { + size, err := m.WebhookClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -445,41 +1201,52 @@ func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -487,41 +1254,52 @@ func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -529,37 +1307,46 @@ func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -567,63 +1354,60 @@ func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) - i += copy(dAtA[i:], m.Plural) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) - i += copy(dAtA[i:], m.Singular) - if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x2a - i++ + i -= len(m.ListKind) + copy(dAtA[i:], m.ListKind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) - i += copy(dAtA[i:], m.ListKind) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x2a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x22 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.Singular) + copy(dAtA[i:], m.Singular) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) + i-- + dAtA[i] = 0x12 + i -= len(m.Plural) + copy(dAtA[i:], m.Plural) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -631,91 +1415,121 @@ func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Names.Size())) - n10, err := m.Names.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreserveUnknownFields != nil { + i-- + if *m.PreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) - i += copy(dAtA[i:], m.Scope) - if m.Validation != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Validation.Size())) - n11, err := m.Validation.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Conversion != nil { + { + size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- + dAtA[i] = 0x4a } - if m.Subresources != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Subresources.Size())) - n12, err := m.Subresources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - i += n12 } if len(m.Versions) > 0 { - for _, msg := range m.Versions { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - if len(m.AdditionalPrinterColumns) > 0 { - for _, msg := range m.AdditionalPrinterColumns { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Validation != nil { + { + size, err := m.Validation.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.Conversion != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Conversion.Size())) - n13, err := m.Conversion.MarshalTo(dAtA[i:]) + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n13 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -723,122 +1537,137 @@ func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcceptedNames.Size())) - n14, err := m.AcceptedNames.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 if len(m.StoredVersions) > 0 { - for _, s := range m.StoredVersions { + for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StoredVersions[iNdEx]) + copy(dAtA[i:], m.StoredVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil -} - -func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } + { + size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } return dAtA[:n], nil } func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Served { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Storage { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Schema != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n15, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n15 } if m.Subresources != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Subresources.Size())) - n16, err := m.Subresources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x2a } - if len(m.AdditionalPrinterColumns) > 0 { - for _, msg := range m.AdditionalPrinterColumns { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i-- + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -846,31 +1675,39 @@ func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) - i += copy(dAtA[i:], m.SpecReplicasPath) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) - i += copy(dAtA[i:], m.StatusReplicasPath) if m.LabelSelectorPath != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.LabelSelectorPath) + copy(dAtA[i:], *m.LabelSelectorPath) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) - i += copy(dAtA[i:], *m.LabelSelectorPath) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.StatusReplicasPath) + copy(dAtA[i:], m.StatusReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.SpecReplicasPath) + copy(dAtA[i:], m.SpecReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -878,17 +1715,22 @@ func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -896,37 +1738,46 @@ func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Status != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n17, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } if m.Scale != nil { + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Scale.Size())) - n18, err := m.Scale.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n18 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -934,27 +1785,34 @@ func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.OpenAPIV3Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.OpenAPIV3Schema.Size())) - n19, err := m.OpenAPIV3Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -962,25 +1820,32 @@ func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { } func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x12 - i++ + i -= len(m.URL) + copy(dAtA[i:], m.URL) i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i += copy(dAtA[i:], m.URL) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JSON) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -988,23 +1853,29 @@ func (m *JSON) Marshal() (dAtA []byte, err error) { } func (m *JSON) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Raw != nil { - dAtA[i] = 0xa - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1012,234 +1883,225 @@ func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) - i += copy(dAtA[i:], m.Schema) - if m.Ref != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) - i += copy(dAtA[i:], *m.Ref) - } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i += copy(dAtA[i:], m.Format) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) - i += copy(dAtA[i:], m.Title) - if m.Default != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Default.Size())) - n20, err := m.Default.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.XListType != nil { + i -= len(*m.XListType) + copy(dAtA[i:], *m.XListType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.XListMapKeys) > 0 { + for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.XListMapKeys[iNdEx]) + copy(dAtA[i:], m.XListMapKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca } - i += n20 } - if m.Maximum != nil { - dAtA[i] = 0x49 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) - i += 8 - } - dAtA[i] = 0x50 - i++ - if m.ExclusiveMaximum { + i-- + if m.XIntOrString { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.Minimum != nil { - dAtA[i] = 0x59 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) - i += 8 - } - dAtA[i] = 0x60 - i++ - if m.ExclusiveMinimum { + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + i-- + if m.XEmbeddedResource { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.MaxLength != nil { - dAtA[i] = 0x68 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) - } - if m.MinLength != nil { - dAtA[i] = 0x70 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) - } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) - i += copy(dAtA[i:], m.Pattern) - if m.MaxItems != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) - } - if m.MinItems != nil { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + if m.XPreserveUnknownFields != nil { + i-- + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 } - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - if m.UniqueItems { + i-- + if m.Nullable { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.MultipleOf != nil { - dAtA[i] = 0x99 - i++ - dAtA[i] = 0x1 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) - i += 8 - } - if len(m.Enum) > 0 { - for _, msg := range m.Enum { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + if m.Example != nil { + { + size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 } - if m.MaxProperties != nil { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) - } - if m.MinProperties != nil { - dAtA[i] = 0xb0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) - } - if len(m.Required) > 0 { - for _, s := range m.Required { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ExternalDocs != nil { + { + size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a } - if m.Items != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Items.Size())) - n21, err := m.Items.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Definitions) > 0 { + keysForDefinitions := make([]string, 0, len(m.Definitions)) + for k := range m.Definitions { + keysForDefinitions = append(keysForDefinitions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Definitions[string(keysForDefinitions[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefinitions[iNdEx]) + copy(dAtA[i:], keysForDefinitions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 } - i += n21 } - if len(m.AllOf) > 0 { - for _, msg := range m.AllOf { - dAtA[i] = 0xca - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AdditionalItems != nil { + { + size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if len(m.OneOf) > 0 { - for _, msg := range m.OneOf { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.Dependencies) > 0 { + keysForDependencies := make([]string, 0, len(m.Dependencies)) + for k := range m.Dependencies { + keysForDependencies = append(keysForDependencies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { + v := m.Dependencies[string(keysForDependencies[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + i -= len(keysForDependencies[iNdEx]) + copy(dAtA[i:], keysForDependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 } } - if len(m.AnyOf) > 0 { - for _, msg := range m.AnyOf { - dAtA[i] = 0xda - i++ + if len(m.PatternProperties) > 0 { + keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) + for k := range m.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPatternProperties[iNdEx]) + copy(dAtA[i:], keysForPatternProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0xfa + } + } + if m.AdditionalProperties != nil { + { + size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.Not != nil { - dAtA[i] = 0xe2 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Not.Size())) - n22, err := m.Not.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 + i-- + dAtA[i] = 0xf2 } if len(m.Properties) > 0 { keysForProperties := make([]string, 0, len(m.Properties)) @@ -1247,197 +2109,279 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { keysForProperties = append(keysForProperties, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) - for _, k := range keysForProperties { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x1 - i++ - v := m.Properties[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.Properties[string(keysForProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n23, err := (&v).MarshalTo(dAtA[i:]) + i -= len(keysForProperties[iNdEx]) + copy(dAtA[i:], keysForProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + if m.Not != nil { + { + size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n23 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AdditionalProperties != nil { - dAtA[i] = 0xf2 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalProperties.Size())) - n24, err := m.AdditionalProperties.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 + i-- + dAtA[i] = 0xe2 } - if len(m.PatternProperties) > 0 { - keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) - for k := range m.PatternProperties { - keysForPatternProperties = append(keysForPatternProperties, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) - for _, k := range keysForPatternProperties { - dAtA[i] = 0xfa - i++ - dAtA[i] = 0x1 - i++ - v := m.PatternProperties[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n25, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda } } - if len(m.Dependencies) > 0 { - keysForDependencies := make([]string, 0, len(m.Dependencies)) - for k := range m.Dependencies { - keysForDependencies = append(keysForDependencies, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) - for _, k := range keysForDependencies { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x2 - i++ - v := m.Dependencies[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n26, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.OneOf) > 0 { + for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } } - if m.AdditionalItems != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalItems.Size())) - n27, err := m.AdditionalItems.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca } - i += n27 } - if len(m.Definitions) > 0 { - keysForDefinitions := make([]string, 0, len(m.Definitions)) - for k := range m.Definitions { - keysForDefinitions = append(keysForDefinitions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) - for _, k := range keysForDefinitions { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x2 - i++ - v := m.Definitions[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n28, err := (&v).MarshalTo(dAtA[i:]) + if m.Items != nil { + { + size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if m.ExternalDocs != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExternalDocs.Size())) - n29, err := m.ExternalDocs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Required) > 0 { + for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Required[iNdEx]) + copy(dAtA[i:], m.Required[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - i += n29 } - if m.Example != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Example.Size())) - n30, err := m.Example.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MinProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.MaxProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - i += n30 } - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x2 - i++ - if m.Nullable { + if m.MultipleOf != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x99 + } + i-- + if m.UniqueItems { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + if m.MinItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.MaxItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0x7a + if m.MinLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + i-- + dAtA[i] = 0x70 + } + if m.MaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + i-- + dAtA[i] = 0x68 + } + i-- + if m.ExclusiveMinimum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + if m.Minimum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i-- + dAtA[i] = 0x59 + } + i-- + if m.ExclusiveMaximum { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x50 + if m.Maximum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i-- + dAtA[i] = 0x49 + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0x3a + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x32 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + if m.Ref != nil { + i -= len(*m.Ref) + copy(dAtA[i:], *m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1445,39 +2389,48 @@ func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n31, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if len(m.JSONSchemas) > 0 { - for _, msg := range m.JSONSchemas { + for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1485,35 +2438,42 @@ func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- if m.Allows { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.Schema != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n32, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1521,42 +2481,43 @@ func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n33, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } if len(m.Property) > 0 { - for _, s := range m.Property { + for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Property[iNdEx]) + copy(dAtA[i:], m.Property[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1564,31 +2525,44 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } if m.Path != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1596,45 +2570,59 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n34, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a } if m.CABundle != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + i-- + dAtA[i] = 0x12 } - if m.URL != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ConversionRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -1651,6 +2639,9 @@ func (m *ConversionRequest) Size() (n int) { } func (m *ConversionResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -1667,6 +2658,9 @@ func (m *ConversionResponse) Size() (n int) { } func (m *ConversionReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -1681,6 +2675,9 @@ func (m *ConversionReview) Size() (n int) { } func (m *CustomResourceColumnDefinition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1698,6 +2695,9 @@ func (m *CustomResourceColumnDefinition) Size() (n int) { } func (m *CustomResourceConversion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Strategy) @@ -1716,6 +2716,9 @@ func (m *CustomResourceConversion) Size() (n int) { } func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1728,6 +2731,9 @@ func (m *CustomResourceDefinition) Size() (n int) { } func (m *CustomResourceDefinitionCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1744,6 +2750,9 @@ func (m *CustomResourceDefinitionCondition) Size() (n int) { } func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1758,6 +2767,9 @@ func (m *CustomResourceDefinitionList) Size() (n int) { } func (m *CustomResourceDefinitionNames) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Plural) @@ -1784,6 +2796,9 @@ func (m *CustomResourceDefinitionNames) Size() (n int) { } func (m *CustomResourceDefinitionSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -1818,10 +2833,16 @@ func (m *CustomResourceDefinitionSpec) Size() (n int) { l = m.Conversion.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.PreserveUnknownFields != nil { + n += 2 + } return n } func (m *CustomResourceDefinitionStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -1842,6 +2863,9 @@ func (m *CustomResourceDefinitionStatus) Size() (n int) { } func (m *CustomResourceDefinitionVersion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1866,6 +2890,9 @@ func (m *CustomResourceDefinitionVersion) Size() (n int) { } func (m *CustomResourceSubresourceScale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.SpecReplicasPath) @@ -1880,12 +2907,18 @@ func (m *CustomResourceSubresourceScale) Size() (n int) { } func (m *CustomResourceSubresourceStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *CustomResourceSubresources) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Status != nil { @@ -1900,6 +2933,9 @@ func (m *CustomResourceSubresources) Size() (n int) { } func (m *CustomResourceValidation) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.OpenAPIV3Schema != nil { @@ -1910,6 +2946,9 @@ func (m *CustomResourceValidation) Size() (n int) { } func (m *ExternalDocumentation) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Description) @@ -1920,6 +2959,9 @@ func (m *ExternalDocumentation) Size() (n int) { } func (m *JSON) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Raw != nil { @@ -1930,6 +2972,9 @@ func (m *JSON) Size() (n int) { } func (m *JSONSchemaProps) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -2075,10 +3120,28 @@ func (m *JSONSchemaProps) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } n += 3 + if m.XPreserveUnknownFields != nil { + n += 3 + } + n += 3 + n += 3 + if len(m.XListMapKeys) > 0 { + for _, s := range m.XListMapKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.XListType != nil { + l = len(*m.XListType) + n += 2 + l + sovGenerated(uint64(l)) + } return n } func (m *JSONSchemaPropsOrArray) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Schema != nil { @@ -2095,6 +3158,9 @@ func (m *JSONSchemaPropsOrArray) Size() (n int) { } func (m *JSONSchemaPropsOrBool) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -2106,6 +3172,9 @@ func (m *JSONSchemaPropsOrBool) Size() (n int) { } func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Schema != nil { @@ -2122,6 +3191,9 @@ func (m *JSONSchemaPropsOrStringArray) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -2132,10 +3204,16 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } return n } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Service != nil { @@ -2154,14 +3232,7 @@ func (m *WebhookClientConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2170,10 +3241,15 @@ func (this *ConversionRequest) String() string { if this == nil { return "nil" } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" s := strings.Join([]string{`&ConversionRequest{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, - `Objects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Objects), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Objects:` + repeatedStringForObjects + `,`, `}`, }, "") return s @@ -2182,10 +3258,15 @@ func (this *ConversionResponse) String() string { if this == nil { return "nil" } + repeatedStringForConvertedObjects := "[]RawExtension{" + for _, f := range this.ConvertedObjects { + repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConvertedObjects += "}" s := strings.Join([]string{`&ConversionResponse{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ConvertedObjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConvertedObjects), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(strings.Replace(this.Result.String(), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1), `&`, ``, 1) + `,`, + `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2195,8 +3276,8 @@ func (this *ConversionReview) String() string { return "nil" } s := strings.Join([]string{`&ConversionReview{`, - `Request:` + strings.Replace(fmt.Sprintf("%v", this.Request), "ConversionRequest", "ConversionRequest", 1) + `,`, - `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "ConversionResponse", "ConversionResponse", 1) + `,`, + `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, `}`, }, "") return s @@ -2222,7 +3303,7 @@ func (this *CustomResourceConversion) String() string { } s := strings.Join([]string{`&CustomResourceConversion{`, `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, - `WebhookClientConfig:` + strings.Replace(fmt.Sprintf("%v", this.WebhookClientConfig), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `WebhookClientConfig:` + strings.Replace(this.WebhookClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, `}`, }, "") @@ -2233,7 +3314,7 @@ func (this *CustomResourceDefinition) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceDefinition{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2247,7 +3328,7 @@ func (this *CustomResourceDefinitionCondition) String() string { s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2258,9 +3339,14 @@ func (this *CustomResourceDefinitionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CustomResourceDefinitionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2284,16 +3370,27 @@ func (this *CustomResourceDefinitionSpec) String() string { if this == nil { return "nil" } + repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, - `Validation:` + strings.Replace(fmt.Sprintf("%v", this.Validation), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + `,`, - `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, - `Conversion:` + strings.Replace(fmt.Sprintf("%v", this.Conversion), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `Validation:` + strings.Replace(this.Validation.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `PreserveUnknownFields:` + valueToStringGenerated(this.PreserveUnknownFields) + `,`, `}`, }, "") return s @@ -2302,8 +3399,13 @@ func (this *CustomResourceDefinitionStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, `}`, @@ -2314,13 +3416,18 @@ func (this *CustomResourceDefinitionVersion) String() string { if this == nil { return "nil" } + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Served:` + fmt.Sprintf("%v", this.Served) + `,`, `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, `}`, }, "") return s @@ -2351,8 +3458,8 @@ func (this *CustomResourceSubresources) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceSubresources{`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, - `Scale:` + strings.Replace(fmt.Sprintf("%v", this.Scale), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, + `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, + `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, `}`, }, "") return s @@ -2362,7 +3469,7 @@ func (this *CustomResourceValidation) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceValidation{`, - `OpenAPIV3Schema:` + strings.Replace(fmt.Sprintf("%v", this.OpenAPIV3Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `}`, }, "") return s @@ -2392,6 +3499,26 @@ func (this *JSONSchemaProps) String() string { if this == nil { return "nil" } + repeatedStringForEnum := "[]JSON{" + for _, f := range this.Enum { + repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," + } + repeatedStringForEnum += "}" + repeatedStringForAllOf := "[]JSONSchemaProps{" + for _, f := range this.AllOf { + repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAllOf += "}" + repeatedStringForOneOf := "[]JSONSchemaProps{" + for _, f := range this.OneOf { + repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForOneOf += "}" + repeatedStringForAnyOf := "[]JSONSchemaProps{" + for _, f := range this.AnyOf { + repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAnyOf += "}" keysForProperties := make([]string, 0, len(this.Properties)) for k := range this.Properties { keysForProperties = append(keysForProperties, k) @@ -2440,7 +3567,7 @@ func (this *JSONSchemaProps) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Format:` + fmt.Sprintf("%v", this.Format) + `,`, `Title:` + fmt.Sprintf("%v", this.Title) + `,`, - `Default:` + strings.Replace(fmt.Sprintf("%v", this.Default), "JSON", "JSON", 1) + `,`, + `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, @@ -2452,24 +3579,29 @@ func (this *JSONSchemaProps) String() string { `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, - `Enum:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Enum), "JSON", "JSON", 1), `&`, ``, 1) + `,`, + `Enum:` + repeatedStringForEnum + `,`, `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, `Required:` + fmt.Sprintf("%v", this.Required) + `,`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, - `AllOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `OneOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OneOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `AnyOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AnyOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `Not:` + strings.Replace(fmt.Sprintf("%v", this.Not), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, + `AllOf:` + repeatedStringForAllOf + `,`, + `OneOf:` + repeatedStringForOneOf + `,`, + `AnyOf:` + repeatedStringForAnyOf + `,`, + `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `Properties:` + mapStringForProperties + `,`, - `AdditionalProperties:` + strings.Replace(fmt.Sprintf("%v", this.AdditionalProperties), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, `PatternProperties:` + mapStringForPatternProperties + `,`, `Dependencies:` + mapStringForDependencies + `,`, - `AdditionalItems:` + strings.Replace(fmt.Sprintf("%v", this.AdditionalItems), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, `Definitions:` + mapStringForDefinitions + `,`, - `ExternalDocs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalDocs), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, - `Example:` + strings.Replace(fmt.Sprintf("%v", this.Example), "JSON", "JSON", 1) + `,`, + `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, + `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, + `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, + `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, + `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, + `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, + `XListType:` + valueToStringGenerated(this.XListType) + `,`, `}`, }, "") return s @@ -2478,9 +3610,14 @@ func (this *JSONSchemaPropsOrArray) String() string { if this == nil { return "nil" } + repeatedStringForJSONSchemas := "[]JSONSchemaProps{" + for _, f := range this.JSONSchemas { + repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForJSONSchemas += "}" s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `JSONSchemas:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.JSONSchemas), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, `}`, }, "") return s @@ -2491,7 +3628,7 @@ func (this *JSONSchemaPropsOrBool) String() string { } s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `}`, }, "") return s @@ -2501,7 +3638,7 @@ func (this *JSONSchemaPropsOrStringArray) String() string { return "nil" } s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `Property:` + fmt.Sprintf("%v", this.Property) + `,`, `}`, }, "") @@ -2515,6 +3652,7 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, `}`, }, "") return s @@ -2524,7 +3662,7 @@ func (this *WebhookClientConfig) String() string { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, @@ -2554,7 +3692,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2582,7 +3720,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2592,6 +3730,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2611,7 +3752,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2621,6 +3762,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2640,7 +3784,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2649,10 +3793,13 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Objects = append(m.Objects, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Objects = append(m.Objects, runtime.RawExtension{}) if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2666,6 +3813,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2693,7 +3843,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2721,7 +3871,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2731,6 +3881,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2750,7 +3903,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2759,10 +3912,13 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConvertedObjects = append(m.ConvertedObjects, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2781,7 +3937,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2790,6 +3946,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2806,6 +3965,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2833,7 +3995,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2861,7 +4023,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2870,6 +4032,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2894,7 +4059,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2903,6 +4068,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2922,6 +4090,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2949,7 +4120,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2977,7 +4148,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2987,6 +4158,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3006,7 +4180,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3016,6 +4190,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3035,7 +4212,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3045,6 +4222,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3064,7 +4244,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3074,6 +4254,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3093,7 +4276,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Priority |= (int32(b) & 0x7F) << shift + m.Priority |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3112,7 +4295,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3122,6 +4305,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3136,6 +4322,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3163,7 +4352,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3191,7 +4380,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3201,6 +4390,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3220,7 +4412,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3229,6 +4421,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3253,7 +4448,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3263,6 +4458,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3277,6 +4475,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3304,7 +4505,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3332,7 +4533,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3341,6 +4542,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3362,7 +4566,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3371,6 +4575,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3392,7 +4599,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3401,6 +4608,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3417,6 +4627,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3444,7 +4657,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3472,7 +4685,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3482,6 +4695,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3501,7 +4717,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3511,6 +4727,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3530,7 +4749,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3539,6 +4758,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3560,7 +4782,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3570,6 +4792,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3589,7 +4814,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3599,6 +4824,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3613,6 +4841,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3640,7 +4871,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3668,7 +4899,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3677,6 +4908,9 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3698,7 +4932,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3707,6 +4941,9 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3724,6 +4961,9 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3751,7 +4991,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3779,7 +5019,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3789,6 +5029,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3808,7 +5051,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3818,6 +5061,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3837,7 +5083,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3847,6 +5093,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3866,7 +5115,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3876,6 +5125,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3895,7 +5147,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3905,6 +5157,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3924,7 +5179,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3934,6 +5189,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3948,6 +5206,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3975,7 +5236,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4003,7 +5264,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4013,6 +5274,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4032,7 +5296,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4042,6 +5306,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4061,7 +5328,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4070,6 +5337,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4091,7 +5361,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4101,6 +5371,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4120,7 +5393,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4129,6 +5402,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4153,7 +5429,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4162,6 +5438,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4186,7 +5465,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4195,6 +5474,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4217,7 +5499,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4226,6 +5508,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4248,7 +5533,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4257,6 +5542,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4267,6 +5555,27 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PreserveUnknownFields = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4276,6 +5585,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4303,7 +5615,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4331,7 +5643,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4340,6 +5652,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4362,7 +5677,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4371,6 +5686,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4392,7 +5710,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4402,6 +5720,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4416,6 +5737,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4443,7 +5767,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4471,7 +5795,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4481,6 +5805,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4500,7 +5827,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4520,7 +5847,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4540,7 +5867,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4549,6 +5876,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4573,7 +5903,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4582,6 +5912,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4606,7 +5939,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4615,6 +5948,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4632,6 +5968,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4659,7 +5998,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4687,7 +6026,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4697,6 +6036,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4716,7 +6058,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4726,6 +6068,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4745,7 +6090,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4755,6 +6100,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4770,6 +6118,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4797,7 +6148,7 @@ func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4820,6 +6171,9 @@ func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4847,7 +6201,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4875,7 +6229,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4884,6 +6238,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4908,7 +6265,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4917,6 +6274,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4936,6 +6296,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4963,7 +6326,7 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4991,7 +6354,7 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5000,6 +6363,9 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5019,6 +6385,9 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5046,7 +6415,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5074,7 +6443,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5084,6 +6453,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5103,7 +6475,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5113,6 +6485,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5127,6 +6502,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5154,7 +6532,7 @@ func (m *JSON) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5182,7 +6560,7 @@ func (m *JSON) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5191,6 +6569,9 @@ func (m *JSON) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5208,6 +6589,9 @@ func (m *JSON) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5235,7 +6619,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5263,7 +6647,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5273,6 +6657,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5292,7 +6679,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5302,6 +6689,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5321,7 +6711,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5331,6 +6721,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5351,7 +6744,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5361,6 +6754,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5380,7 +6776,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5390,6 +6786,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5409,7 +6808,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5419,6 +6818,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5438,7 +6840,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5448,6 +6850,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5467,7 +6872,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5476,6 +6881,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5512,7 +6920,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5544,7 +6952,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5564,7 +6972,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5584,7 +6992,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5604,7 +7012,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5614,6 +7022,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5633,7 +7044,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5653,7 +7064,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5673,7 +7084,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5705,7 +7116,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5714,6 +7125,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5736,7 +7150,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5756,7 +7170,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5776,7 +7190,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5786,6 +7200,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5805,7 +7222,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5814,6 +7231,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5838,7 +7258,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5847,6 +7267,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5869,7 +7292,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5878,6 +7301,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5900,7 +7326,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5909,6 +7335,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5931,7 +7360,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5940,6 +7369,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5964,7 +7396,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5973,6 +7405,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5993,7 +7428,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6010,7 +7445,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6020,6 +7455,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6036,7 +7474,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6045,7 +7483,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6087,7 +7525,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6096,6 +7534,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6120,7 +7561,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6129,6 +7570,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6149,7 +7593,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6166,7 +7610,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6176,6 +7620,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6192,7 +7639,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6201,7 +7648,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6243,7 +7690,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6252,6 +7699,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6272,7 +7722,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6289,7 +7739,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6299,6 +7749,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6315,7 +7768,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6324,7 +7777,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6366,7 +7819,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6375,6 +7828,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6399,7 +7855,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6408,6 +7864,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6428,7 +7887,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6445,7 +7904,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6455,6 +7914,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6471,7 +7933,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6480,7 +7942,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6522,7 +7984,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6531,6 +7993,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6555,7 +8020,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6564,6 +8029,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6588,12 +8056,138 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Nullable = bool(v != 0) + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.XPreserveUnknownFields = &b + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XEmbeddedResource = bool(v != 0) + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XIntOrString = bool(v != 0) + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XListType = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6603,6 +8197,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6630,7 +8227,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6658,7 +8255,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6667,6 +8264,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6691,7 +8291,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6700,6 +8300,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6717,6 +8320,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6744,7 +8350,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6772,7 +8378,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6792,7 +8398,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6801,6 +8407,9 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6820,6 +8429,9 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6847,7 +8459,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6875,7 +8487,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6884,6 +8496,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6908,7 +8523,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6918,6 +8533,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6932,6 +8550,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6959,7 +8580,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6987,7 +8608,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6997,6 +8618,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7016,7 +8640,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7026,6 +8650,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7045,7 +8672,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7055,12 +8682,35 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.Path = &s iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7070,6 +8720,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7097,7 +8750,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7125,7 +8778,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7134,6 +8787,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7158,7 +8814,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7167,6 +8823,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7189,7 +8848,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7199,6 +8858,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7214,6 +8876,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7280,10 +8945,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -7312,6 +8980,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -7330,185 +9001,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2783 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xdf, 0x6f, 0x23, 0x57, - 0xf5, 0xdf, 0xb1, 0xe3, 0xc4, 0xb9, 0x49, 0x36, 0xc9, 0xdd, 0x66, 0x3b, 0x9b, 0x6f, 0x6a, 0x27, - 0xee, 0xb7, 0x55, 0x28, 0xbb, 0x4e, 0xbb, 0xb4, 0xb4, 0x54, 0xe2, 0x21, 0x76, 0xd2, 0x2a, 0x65, - 0xf3, 0x83, 0xeb, 0xdd, 0xb6, 0xd0, 0x9f, 0x37, 0xe3, 0x6b, 0x67, 0x36, 0xf3, 0x6b, 0xe7, 0xce, - 0x38, 0x89, 0x0a, 0x08, 0xa8, 0x2a, 0x10, 0x02, 0x8a, 0xe8, 0xbe, 0x20, 0xe0, 0x01, 0x10, 0x2f, - 0x3c, 0xc0, 0x03, 0xbc, 0xc1, 0x1f, 0xb0, 0x8f, 0x15, 0x4f, 0x15, 0x42, 0x16, 0xeb, 0xfe, 0x0b, - 0x48, 0x48, 0x79, 0x42, 0xf7, 0xc7, 0xdc, 0x19, 0x8f, 0xed, 0xdd, 0xa8, 0x6b, 0x77, 0x79, 0xcb, - 0x9c, 0x73, 0xee, 0xf9, 0x9c, 0x7b, 0xee, 0x39, 0xe7, 0x9e, 0x7b, 0x1c, 0xd0, 0x38, 0x7c, 0x81, - 0x96, 0x4d, 0x77, 0xed, 0x30, 0xdc, 0x27, 0xbe, 0x43, 0x02, 0x42, 0xd7, 0x5a, 0xc4, 0xa9, 0xbb, - 0xfe, 0x9a, 0x64, 0x60, 0xcf, 0x24, 0xc7, 0x01, 0x71, 0xa8, 0xe9, 0x3a, 0xf4, 0x0a, 0xf6, 0x4c, - 0x4a, 0xfc, 0x16, 0xf1, 0xd7, 0xbc, 0xc3, 0x26, 0xe3, 0xd1, 0x6e, 0x81, 0xb5, 0xd6, 0x33, 0xfb, - 0x24, 0xc0, 0xcf, 0xac, 0x35, 0x89, 0x43, 0x7c, 0x1c, 0x90, 0x7a, 0xd9, 0xf3, 0xdd, 0xc0, 0x85, - 0x5f, 0x15, 0xea, 0xca, 0x5d, 0xd2, 0xef, 0x28, 0x75, 0x65, 0xef, 0xb0, 0xc9, 0x78, 0xb4, 0x5b, - 0xa0, 0x2c, 0xd5, 0x2d, 0x5e, 0x69, 0x9a, 0xc1, 0x41, 0xb8, 0x5f, 0x36, 0x5c, 0x7b, 0xad, 0xe9, - 0x36, 0xdd, 0x35, 0xae, 0x75, 0x3f, 0x6c, 0xf0, 0x2f, 0xfe, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0xcf, - 0xc6, 0xc6, 0xdb, 0xd8, 0x38, 0x30, 0x1d, 0xe2, 0x9f, 0xc4, 0x16, 0xdb, 0x24, 0xc0, 0x6b, 0xad, - 0x1e, 0x1b, 0x17, 0xd7, 0x06, 0xad, 0xf2, 0x43, 0x27, 0x30, 0x6d, 0xd2, 0xb3, 0xe0, 0xcb, 0xf7, - 0x5b, 0x40, 0x8d, 0x03, 0x62, 0xe3, 0xf4, 0xba, 0xd2, 0xa9, 0x06, 0xe6, 0xab, 0xae, 0xd3, 0x22, - 0x3e, 0xdb, 0x25, 0x22, 0xb7, 0x42, 0x42, 0x03, 0x58, 0x01, 0xd9, 0xd0, 0xac, 0xeb, 0xda, 0xb2, - 0xb6, 0x3a, 0x59, 0x79, 0xfa, 0x4e, 0xbb, 0x78, 0xae, 0xd3, 0x2e, 0x66, 0x6f, 0x6c, 0x6d, 0x9c, - 0xb6, 0x8b, 0x2b, 0x83, 0x90, 0x82, 0x13, 0x8f, 0xd0, 0xf2, 0x8d, 0xad, 0x0d, 0xc4, 0x16, 0xc3, - 0x97, 0xc1, 0x7c, 0x9d, 0x50, 0xd3, 0x27, 0xf5, 0xf5, 0xbd, 0xad, 0x57, 0x85, 0x7e, 0x3d, 0xc3, - 0x35, 0x5e, 0x92, 0x1a, 0xe7, 0x37, 0xd2, 0x02, 0xa8, 0x77, 0x0d, 0x7c, 0x1d, 0x4c, 0xb8, 0xfb, - 0x37, 0x89, 0x11, 0x50, 0x3d, 0xbb, 0x9c, 0x5d, 0x9d, 0xba, 0x7a, 0xa5, 0x1c, 0x9f, 0xa0, 0x32, - 0x81, 0x1f, 0x9b, 0xdc, 0x6c, 0x19, 0xe1, 0xa3, 0xcd, 0xe8, 0xe4, 0x2a, 0xb3, 0x12, 0x6d, 0x62, - 0x57, 0x68, 0x41, 0x91, 0xba, 0xd2, 0xef, 0x32, 0x00, 0x26, 0x37, 0x4f, 0x3d, 0xd7, 0xa1, 0x64, - 0x28, 0xbb, 0xa7, 0x60, 0xce, 0xe0, 0x9a, 0x03, 0x52, 0x97, 0xb8, 0x7a, 0xe6, 0xb3, 0x58, 0xaf, - 0x4b, 0xfc, 0xb9, 0x6a, 0x4a, 0x1d, 0xea, 0x01, 0x80, 0xd7, 0xc1, 0xb8, 0x4f, 0x68, 0x68, 0x05, - 0x7a, 0x76, 0x59, 0x5b, 0x9d, 0xba, 0x7a, 0x79, 0x20, 0x14, 0x8f, 0x6f, 0x16, 0x7c, 0xe5, 0xd6, - 0x33, 0xe5, 0x5a, 0x80, 0x83, 0x90, 0x56, 0xce, 0x4b, 0xa4, 0x71, 0xc4, 0x75, 0x20, 0xa9, 0xab, - 0xf4, 0xc3, 0x0c, 0x98, 0x4b, 0x7a, 0xa9, 0x65, 0x92, 0x23, 0x78, 0x04, 0x26, 0x7c, 0x11, 0x2c, - 0xdc, 0x4f, 0x53, 0x57, 0xf7, 0xca, 0x0f, 0x94, 0x56, 0xe5, 0x9e, 0x20, 0xac, 0x4c, 0xb1, 0x33, - 0x93, 0x1f, 0x28, 0x42, 0x83, 0xef, 0x81, 0xbc, 0x2f, 0x0f, 0x8a, 0x47, 0xd3, 0xd4, 0xd5, 0xaf, - 0x0f, 0x11, 0x59, 0x28, 0xae, 0x4c, 0x77, 0xda, 0xc5, 0x7c, 0xf4, 0x85, 0x14, 0x60, 0xe9, 0xa3, - 0x0c, 0x28, 0x54, 0x43, 0x1a, 0xb8, 0x36, 0x22, 0xd4, 0x0d, 0x7d, 0x83, 0x54, 0x5d, 0x2b, 0xb4, - 0x9d, 0x0d, 0xd2, 0x30, 0x1d, 0x33, 0x60, 0xd1, 0xba, 0x0c, 0xc6, 0x1c, 0x6c, 0x13, 0x19, 0x3d, - 0xd3, 0xd2, 0xa7, 0x63, 0x3b, 0xd8, 0x26, 0x88, 0x73, 0x98, 0x04, 0x0b, 0x16, 0x99, 0x0b, 0x4a, - 0xe2, 0xfa, 0x89, 0x47, 0x10, 0xe7, 0xc0, 0x27, 0xc1, 0x78, 0xc3, 0xf5, 0x6d, 0x2c, 0xce, 0x71, - 0x32, 0x3e, 0x99, 0x97, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x0e, 0x4c, 0xd5, 0x09, 0x35, 0x7c, 0xd3, - 0x63, 0xd0, 0xfa, 0x18, 0x17, 0xbe, 0x20, 0x85, 0xa7, 0x36, 0x62, 0x16, 0x4a, 0xca, 0xc1, 0xcb, - 0x20, 0xef, 0xf9, 0xa6, 0xeb, 0x9b, 0xc1, 0x89, 0x9e, 0x5b, 0xd6, 0x56, 0x73, 0x95, 0x39, 0xb9, - 0x26, 0xbf, 0x27, 0xe9, 0x48, 0x49, 0xc0, 0x65, 0x90, 0x7f, 0xa5, 0xb6, 0xbb, 0xb3, 0x87, 0x83, - 0x03, 0x7d, 0x9c, 0x23, 0x8c, 0x31, 0x69, 0x94, 0xbf, 0x29, 0xa9, 0xa5, 0x7f, 0x66, 0x80, 0x9e, - 0xf6, 0x4a, 0xe4, 0x52, 0xf8, 0x12, 0xc8, 0xd3, 0x80, 0x55, 0x9c, 0xe6, 0x89, 0xf4, 0xc9, 0x53, - 0x11, 0x58, 0x4d, 0xd2, 0x4f, 0xdb, 0xc5, 0x8b, 0xf1, 0x8a, 0x88, 0xca, 0xfd, 0xa1, 0xd6, 0xc2, - 0x5f, 0x6b, 0xe0, 0xc2, 0x11, 0xd9, 0x3f, 0x70, 0xdd, 0xc3, 0xaa, 0x65, 0x12, 0x27, 0xa8, 0xba, - 0x4e, 0xc3, 0x6c, 0xca, 0x18, 0x40, 0x0f, 0x18, 0x03, 0xaf, 0xf5, 0x6a, 0xae, 0x3c, 0xda, 0x69, - 0x17, 0x2f, 0xf4, 0x61, 0xa0, 0x7e, 0x76, 0xc0, 0xd7, 0x81, 0x6e, 0xa4, 0x92, 0x44, 0x16, 0x30, - 0x51, 0xb6, 0x26, 0x2b, 0x4b, 0x9d, 0x76, 0x51, 0xaf, 0x0e, 0x90, 0x41, 0x03, 0x57, 0x97, 0xde, - 0xcf, 0xa6, 0xdd, 0x9b, 0x08, 0xb7, 0x77, 0x41, 0x9e, 0xa5, 0x71, 0x1d, 0x07, 0x58, 0x26, 0xe2, - 0xd3, 0x67, 0x4b, 0x7a, 0x51, 0x33, 0xb6, 0x49, 0x80, 0x2b, 0x50, 0x1e, 0x08, 0x88, 0x69, 0x48, - 0x69, 0x85, 0xdf, 0x06, 0x63, 0xd4, 0x23, 0x86, 0x74, 0xf4, 0x1b, 0x0f, 0x9a, 0x6c, 0x03, 0x36, - 0x52, 0xf3, 0x88, 0x11, 0xe7, 0x02, 0xfb, 0x42, 0x1c, 0x16, 0x7e, 0xa0, 0x81, 0x71, 0xca, 0x0b, - 0x94, 0x2c, 0x6a, 0x6f, 0x8d, 0xca, 0x82, 0x54, 0x15, 0x14, 0xdf, 0x48, 0x82, 0x97, 0xfe, 0x9d, - 0x01, 0x2b, 0x83, 0x96, 0x56, 0x5d, 0xa7, 0x2e, 0x8e, 0x63, 0x4b, 0xe6, 0xb6, 0x88, 0xf4, 0xe7, - 0x92, 0xb9, 0x7d, 0xda, 0x2e, 0x3e, 0x71, 0x5f, 0x05, 0x89, 0x22, 0xf0, 0x15, 0xb5, 0x6f, 0x51, - 0x28, 0x56, 0xba, 0x0d, 0x3b, 0x6d, 0x17, 0x67, 0xd5, 0xb2, 0x6e, 0x5b, 0x61, 0x0b, 0x40, 0x0b, - 0xd3, 0xe0, 0xba, 0x8f, 0x1d, 0x2a, 0xd4, 0x9a, 0x36, 0x91, 0xee, 0x7b, 0xea, 0x6c, 0xe1, 0xc1, - 0x56, 0x54, 0x16, 0x25, 0x24, 0xbc, 0xd6, 0xa3, 0x0d, 0xf5, 0x41, 0x60, 0x75, 0xcb, 0x27, 0x98, - 0xaa, 0x52, 0x94, 0xb8, 0x51, 0x18, 0x15, 0x49, 0x2e, 0xfc, 0x02, 0x98, 0xb0, 0x09, 0xa5, 0xb8, - 0x49, 0x78, 0xfd, 0x99, 0x8c, 0xaf, 0xe8, 0x6d, 0x41, 0x46, 0x11, 0x9f, 0xf5, 0x27, 0x4b, 0x83, - 0xbc, 0x76, 0xcd, 0xa4, 0x01, 0x7c, 0xb3, 0x27, 0x01, 0xca, 0x67, 0xdb, 0x21, 0x5b, 0xcd, 0xc3, - 0x5f, 0x15, 0xbf, 0x88, 0x92, 0x08, 0xfe, 0x6f, 0x81, 0x9c, 0x19, 0x10, 0x3b, 0xba, 0xbb, 0x5f, - 0x1b, 0x51, 0xec, 0x55, 0x66, 0xa4, 0x0d, 0xb9, 0x2d, 0x86, 0x86, 0x04, 0x68, 0xe9, 0xf7, 0x19, - 0xf0, 0xd8, 0xa0, 0x25, 0xec, 0x42, 0xa1, 0xcc, 0xe3, 0x9e, 0x15, 0xfa, 0xd8, 0x92, 0x11, 0xa7, - 0x3c, 0xbe, 0xc7, 0xa9, 0x48, 0x72, 0x59, 0xc9, 0xa7, 0xa6, 0xd3, 0x0c, 0x2d, 0xec, 0xcb, 0x70, - 0x52, 0xbb, 0xae, 0x49, 0x3a, 0x52, 0x12, 0xb0, 0x0c, 0x00, 0x3d, 0x70, 0xfd, 0x80, 0x63, 0xc8, - 0xea, 0x75, 0x9e, 0x15, 0x88, 0x9a, 0xa2, 0xa2, 0x84, 0x04, 0xbb, 0xd1, 0x0e, 0x4d, 0xa7, 0x2e, - 0x4f, 0x5d, 0x65, 0xf1, 0xd7, 0x4c, 0xa7, 0x8e, 0x38, 0x87, 0xe1, 0x5b, 0x26, 0x0d, 0x18, 0x45, - 0x1e, 0x79, 0x97, 0xd7, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x06, 0xab, 0xfa, 0xae, 0x6f, 0x12, 0xaa, - 0x8f, 0xc7, 0xf8, 0x55, 0x45, 0x45, 0x09, 0x89, 0xd2, 0x2f, 0xf3, 0x83, 0x83, 0x84, 0x95, 0x12, - 0xf8, 0x38, 0xc8, 0x35, 0x7d, 0x37, 0xf4, 0xa4, 0x97, 0x94, 0xb7, 0x5f, 0x66, 0x44, 0x24, 0x78, - 0x2c, 0x2a, 0x5b, 0x5d, 0x6d, 0xaa, 0x8a, 0xca, 0xa8, 0x39, 0x8d, 0xf8, 0xf0, 0x7b, 0x1a, 0xc8, - 0x39, 0xd2, 0x39, 0x2c, 0xe4, 0xde, 0x1c, 0x51, 0x5c, 0x70, 0xf7, 0xc6, 0xe6, 0x0a, 0xcf, 0x0b, - 0x64, 0xf8, 0x2c, 0xc8, 0x51, 0xc3, 0xf5, 0x88, 0xf4, 0x7a, 0x21, 0x12, 0xaa, 0x31, 0xe2, 0x69, - 0xbb, 0x38, 0x13, 0xa9, 0xe3, 0x04, 0x24, 0x84, 0xe1, 0x0f, 0x34, 0x00, 0x5a, 0xd8, 0x32, 0xeb, - 0x98, 0xb7, 0x0c, 0x39, 0x6e, 0xfe, 0x70, 0xc3, 0xfa, 0x55, 0xa5, 0x5e, 0x1c, 0x5a, 0xfc, 0x8d, - 0x12, 0xd0, 0xf0, 0x43, 0x0d, 0x4c, 0xd3, 0x70, 0xdf, 0x97, 0xab, 0x28, 0x6f, 0x2e, 0xa6, 0xae, - 0x7e, 0x63, 0xa8, 0xb6, 0xd4, 0x12, 0x00, 0x95, 0xb9, 0x4e, 0xbb, 0x38, 0x9d, 0xa4, 0xa0, 0x2e, - 0x03, 0xe0, 0x8f, 0x35, 0x90, 0x6f, 0x45, 0x77, 0xf6, 0x04, 0x4f, 0xf8, 0xb7, 0x47, 0x74, 0xb0, - 0x32, 0xa2, 0xe2, 0x2c, 0x50, 0x7d, 0x80, 0xb2, 0x00, 0xfe, 0x55, 0x03, 0x3a, 0xae, 0x8b, 0x02, - 0x8f, 0xad, 0x3d, 0xdf, 0x74, 0x02, 0xe2, 0x8b, 0x7e, 0x93, 0xea, 0x79, 0x6e, 0xde, 0x70, 0xef, - 0xc2, 0x74, 0x2f, 0x5b, 0x59, 0x96, 0xd6, 0xe9, 0xeb, 0x03, 0xcc, 0x40, 0x03, 0x0d, 0xe4, 0x81, - 0x16, 0xb7, 0x34, 0xfa, 0xe4, 0x08, 0x02, 0x2d, 0xee, 0xa5, 0x64, 0x75, 0x88, 0x3b, 0xa8, 0x04, - 0x74, 0xe9, 0xc3, 0x6c, 0xba, 0x69, 0x4f, 0x5f, 0xfa, 0xf0, 0xb6, 0x30, 0x56, 0x6c, 0x85, 0xea, - 0x1a, 0x77, 0xee, 0xbb, 0x23, 0x3a, 0x7b, 0x75, 0x6b, 0xc7, 0x8d, 0x97, 0x22, 0x51, 0x94, 0xb0, - 0x03, 0xfe, 0x42, 0x03, 0x33, 0xd8, 0x30, 0x88, 0x17, 0x90, 0xba, 0xa8, 0xc5, 0x99, 0xcf, 0xa1, - 0xdc, 0x2c, 0x48, 0xab, 0x66, 0xd6, 0x93, 0xd0, 0xa8, 0xdb, 0x12, 0xf8, 0x22, 0x38, 0x4f, 0x03, - 0xd7, 0x27, 0xf5, 0x54, 0x97, 0x0b, 0x3b, 0xed, 0xe2, 0xf9, 0x5a, 0x17, 0x07, 0xa5, 0x24, 0x4b, - 0x9f, 0x8e, 0x81, 0xe2, 0x7d, 0x32, 0xe3, 0x0c, 0xef, 0xa8, 0x27, 0xc1, 0x38, 0xdf, 0x6e, 0x9d, - 0x7b, 0x25, 0x9f, 0xe8, 0xdc, 0x38, 0x15, 0x49, 0x2e, 0xab, 0xeb, 0x0c, 0x9f, 0x75, 0x1b, 0x59, - 0x2e, 0xa8, 0xea, 0x7a, 0x4d, 0x90, 0x51, 0xc4, 0x87, 0xef, 0x81, 0x71, 0x31, 0x27, 0xe1, 0x45, - 0x75, 0x84, 0x85, 0x11, 0x70, 0x3b, 0x39, 0x14, 0x92, 0x90, 0xbd, 0x05, 0x31, 0xf7, 0xb0, 0x0b, - 0xe2, 0x3d, 0x2b, 0xd0, 0xf8, 0xff, 0x78, 0x05, 0x2a, 0xfd, 0x47, 0x4b, 0xe7, 0x7d, 0x62, 0xab, - 0x35, 0x03, 0x5b, 0x04, 0x6e, 0x80, 0x39, 0xf6, 0xc8, 0x40, 0xc4, 0xb3, 0x4c, 0x03, 0x53, 0xfe, - 0xc6, 0x15, 0x01, 0xa7, 0xc6, 0x2e, 0xb5, 0x14, 0x1f, 0xf5, 0xac, 0x80, 0xaf, 0x00, 0x28, 0x1a, - 0xef, 0x2e, 0x3d, 0xa2, 0x87, 0x50, 0x2d, 0x74, 0xad, 0x47, 0x02, 0xf5, 0x59, 0x05, 0xab, 0x60, - 0xde, 0xc2, 0xfb, 0xc4, 0xaa, 0x11, 0x8b, 0x18, 0x81, 0xeb, 0x73, 0x55, 0x62, 0x0a, 0xb0, 0xd0, - 0x69, 0x17, 0xe7, 0xaf, 0xa5, 0x99, 0xa8, 0x57, 0xbe, 0xb4, 0x92, 0x4e, 0xaf, 0xe4, 0xc6, 0xc5, - 0x73, 0xe6, 0x37, 0x19, 0xb0, 0x38, 0x38, 0x32, 0xe0, 0xf7, 0xe3, 0x57, 0x97, 0x68, 0xaa, 0xdf, - 0x1e, 0x55, 0x14, 0xca, 0x67, 0x17, 0xe8, 0x7d, 0x72, 0xc1, 0xef, 0xb0, 0x0e, 0x07, 0x5b, 0xd1, - 0x9c, 0xe7, 0xad, 0x91, 0x99, 0xc0, 0x40, 0x2a, 0x93, 0xa2, 0x79, 0xc2, 0x16, 0xef, 0x95, 0xb0, - 0x45, 0x4a, 0x7f, 0xd0, 0xd2, 0x0f, 0xef, 0x38, 0x83, 0xe1, 0x4f, 0x34, 0x30, 0xeb, 0x7a, 0xc4, - 0x59, 0xdf, 0xdb, 0x7a, 0xf5, 0x4b, 0x22, 0x93, 0xa5, 0xab, 0x76, 0x1e, 0xd0, 0xce, 0x57, 0x6a, - 0xbb, 0x3b, 0x42, 0xe1, 0x9e, 0xef, 0x7a, 0xb4, 0x72, 0xa1, 0xd3, 0x2e, 0xce, 0xee, 0x76, 0x43, - 0xa1, 0x34, 0x76, 0xc9, 0x06, 0x0b, 0x9b, 0xc7, 0x01, 0xf1, 0x1d, 0x6c, 0x6d, 0xb8, 0x46, 0x68, - 0x13, 0x27, 0x10, 0x86, 0xa6, 0x86, 0x44, 0xda, 0x19, 0x87, 0x44, 0x8f, 0x81, 0x6c, 0xe8, 0x5b, - 0x32, 0x8a, 0xa7, 0xd4, 0x10, 0x14, 0x5d, 0x43, 0x8c, 0x5e, 0x5a, 0x01, 0x63, 0xcc, 0x4e, 0x78, - 0x09, 0x64, 0x7d, 0x7c, 0xc4, 0xb5, 0x4e, 0x57, 0x26, 0x98, 0x08, 0xc2, 0x47, 0x88, 0xd1, 0x4a, - 0xff, 0x58, 0x02, 0xb3, 0xa9, 0xbd, 0xc0, 0x45, 0x90, 0x51, 0x93, 0x55, 0x20, 0x95, 0x66, 0xb6, - 0x36, 0x50, 0xc6, 0xac, 0xc3, 0xe7, 0x55, 0xf1, 0x15, 0xa0, 0x45, 0x55, 0xcf, 0x39, 0x95, 0xb5, - 0xb4, 0xb1, 0x3a, 0x66, 0x48, 0x54, 0x38, 0x99, 0x0d, 0xa4, 0x21, 0xb3, 0x44, 0xd8, 0x40, 0x1a, - 0x88, 0xd1, 0x3e, 0xeb, 0x84, 0x2c, 0x1a, 0xd1, 0xe5, 0xce, 0x30, 0xa2, 0x1b, 0xbf, 0xe7, 0x88, - 0xee, 0x71, 0x90, 0x0b, 0xcc, 0xc0, 0x22, 0xfa, 0x44, 0xf7, 0xcb, 0xe3, 0x3a, 0x23, 0x22, 0xc1, - 0x83, 0x37, 0xc1, 0x44, 0x9d, 0x34, 0x70, 0x68, 0x05, 0x7a, 0x9e, 0x87, 0x50, 0x75, 0x08, 0x21, - 0x24, 0xe6, 0xa7, 0x1b, 0x42, 0x2f, 0x8a, 0x00, 0xe0, 0x13, 0x60, 0xc2, 0xc6, 0xc7, 0xa6, 0x1d, - 0xda, 0xbc, 0x27, 0xd3, 0x84, 0xd8, 0xb6, 0x20, 0xa1, 0x88, 0xc7, 0x2a, 0x23, 0x39, 0x36, 0xac, - 0x90, 0x9a, 0x2d, 0x22, 0x99, 0x3a, 0xe0, 0xb7, 0xa7, 0xaa, 0x8c, 0x9b, 0x29, 0x3e, 0xea, 0x59, - 0xc1, 0xc1, 0x4c, 0x87, 0x2f, 0x9e, 0x4a, 0x80, 0x09, 0x12, 0x8a, 0x78, 0xdd, 0x60, 0x52, 0x7e, - 0x7a, 0x10, 0x98, 0x5c, 0xdc, 0xb3, 0x02, 0x7e, 0x11, 0x4c, 0xda, 0xf8, 0xf8, 0x1a, 0x71, 0x9a, - 0xc1, 0x81, 0x3e, 0xb3, 0xac, 0xad, 0x66, 0x2b, 0x33, 0x9d, 0x76, 0x71, 0x72, 0x3b, 0x22, 0xa2, - 0x98, 0xcf, 0x85, 0x4d, 0x47, 0x0a, 0x9f, 0x4f, 0x08, 0x47, 0x44, 0x14, 0xf3, 0x59, 0x07, 0xe1, - 0xe1, 0x80, 0x25, 0x97, 0x3e, 0xdb, 0xfd, 0x32, 0xdc, 0x13, 0x64, 0x14, 0xf1, 0xe1, 0x2a, 0xc8, - 0xdb, 0xf8, 0x98, 0xbf, 0xe2, 0xf5, 0x39, 0xae, 0x96, 0xcf, 0x92, 0xb7, 0x25, 0x0d, 0x29, 0x2e, - 0x97, 0x34, 0x1d, 0x21, 0x39, 0x9f, 0x90, 0x94, 0x34, 0xa4, 0xb8, 0x2c, 0x88, 0x43, 0xc7, 0xbc, - 0x15, 0x12, 0x21, 0x0c, 0xb9, 0x67, 0x54, 0x10, 0xdf, 0x88, 0x59, 0x28, 0x29, 0xc7, 0x5e, 0xd1, - 0x76, 0x68, 0x05, 0xa6, 0x67, 0x91, 0xdd, 0x86, 0x7e, 0x81, 0xfb, 0x9f, 0xf7, 0xc9, 0xdb, 0x8a, - 0x8a, 0x12, 0x12, 0x90, 0x80, 0x31, 0xe2, 0x84, 0xb6, 0xfe, 0x08, 0xbf, 0xd8, 0x87, 0x12, 0x82, - 0x2a, 0x73, 0x36, 0x9d, 0xd0, 0x46, 0x5c, 0x3d, 0x7c, 0x1e, 0xcc, 0xd8, 0xf8, 0x98, 0x95, 0x03, - 0xe2, 0x07, 0xec, 0x7d, 0xbf, 0xc0, 0x37, 0x3f, 0xcf, 0x3a, 0xce, 0xed, 0x24, 0x03, 0x75, 0xcb, - 0xf1, 0x85, 0xa6, 0x93, 0x58, 0x78, 0x31, 0xb1, 0x30, 0xc9, 0x40, 0xdd, 0x72, 0xcc, 0xd3, 0x3e, - 0xb9, 0x15, 0x9a, 0x3e, 0xa9, 0xeb, 0x8f, 0xf2, 0x26, 0x55, 0xce, 0xf7, 0x05, 0x0d, 0x29, 0x2e, - 0x6c, 0x45, 0xe3, 0x1e, 0x9d, 0xa7, 0xe1, 0x8d, 0xe1, 0x56, 0xf2, 0x5d, 0x7f, 0xdd, 0xf7, 0xf1, - 0x89, 0xb8, 0x69, 0x92, 0x83, 0x1e, 0x48, 0x41, 0x0e, 0x5b, 0xd6, 0x6e, 0x43, 0xbf, 0xc4, 0x7d, - 0x3f, 0xec, 0x1b, 0x44, 0x55, 0x9d, 0x75, 0x06, 0x82, 0x04, 0x16, 0x03, 0x75, 0x1d, 0x16, 0x1a, - 0x8b, 0xa3, 0x05, 0xdd, 0x65, 0x20, 0x48, 0x60, 0xf1, 0x9d, 0x3a, 0x27, 0xbb, 0x0d, 0xfd, 0xff, - 0x46, 0xbc, 0x53, 0x06, 0x82, 0x04, 0x16, 0x34, 0x41, 0xd6, 0x71, 0x03, 0x7d, 0x69, 0x24, 0xd7, - 0x33, 0xbf, 0x70, 0x76, 0xdc, 0x00, 0x31, 0x0c, 0xf8, 0x73, 0x0d, 0x00, 0x2f, 0x0e, 0xd1, 0xc7, - 0x86, 0x32, 0x45, 0x48, 0x41, 0x96, 0xe3, 0xd8, 0xde, 0x74, 0x02, 0xff, 0x24, 0x7e, 0x47, 0x26, - 0x72, 0x20, 0x61, 0x05, 0xfc, 0xad, 0x06, 0x1e, 0x49, 0xb6, 0xc9, 0xca, 0xbc, 0x02, 0xf7, 0xc8, - 0xf5, 0x61, 0x87, 0x79, 0xc5, 0x75, 0xad, 0x8a, 0xde, 0x69, 0x17, 0x1f, 0x59, 0xef, 0x83, 0x8a, - 0xfa, 0xda, 0x02, 0xff, 0xa8, 0x81, 0x79, 0x59, 0x45, 0x13, 0x16, 0x16, 0xb9, 0x03, 0xc9, 0xb0, - 0x1d, 0x98, 0xc6, 0x11, 0x7e, 0x54, 0xbf, 0x4b, 0xf7, 0xf0, 0x51, 0xaf, 0x69, 0xf0, 0x2f, 0x1a, - 0x98, 0xae, 0x13, 0x8f, 0x38, 0x75, 0xe2, 0x18, 0xcc, 0xd6, 0xe5, 0xa1, 0x8c, 0x0d, 0xd2, 0xb6, - 0x6e, 0x24, 0x20, 0x84, 0x99, 0x65, 0x69, 0xe6, 0x74, 0x92, 0x75, 0xda, 0x2e, 0x5e, 0x8c, 0x97, - 0x26, 0x39, 0xa8, 0xcb, 0x4a, 0xf8, 0x91, 0x06, 0x66, 0xe3, 0x03, 0x10, 0x57, 0xca, 0xca, 0x08, - 0xe3, 0x80, 0xb7, 0xaf, 0xeb, 0xdd, 0x80, 0x28, 0x6d, 0x01, 0xfc, 0x93, 0xc6, 0x3a, 0xb5, 0xe8, - 0xdd, 0x47, 0xf5, 0x12, 0xf7, 0xe5, 0x3b, 0x43, 0xf7, 0xa5, 0x42, 0x10, 0xae, 0xbc, 0x1c, 0xb7, - 0x82, 0x8a, 0x73, 0xda, 0x2e, 0x2e, 0x24, 0x3d, 0xa9, 0x18, 0x28, 0x69, 0x21, 0xfc, 0x91, 0x06, - 0xa6, 0x49, 0xdc, 0x71, 0x53, 0xfd, 0xf1, 0xa1, 0x38, 0xb1, 0x6f, 0x13, 0x2f, 0x5e, 0xea, 0x09, - 0x16, 0x45, 0x5d, 0xd8, 0xac, 0x83, 0x24, 0xc7, 0xd8, 0xf6, 0x2c, 0xa2, 0xff, 0xff, 0x90, 0x3b, - 0xc8, 0x4d, 0xa1, 0x17, 0x45, 0x00, 0xf0, 0x32, 0xc8, 0x3b, 0xa1, 0x65, 0xe1, 0x7d, 0x8b, 0xe8, - 0x4f, 0xf0, 0x5e, 0x44, 0x4d, 0x31, 0x77, 0x24, 0x1d, 0x29, 0x89, 0x45, 0xf6, 0x4e, 0x4a, 0xe5, - 0x19, 0x9c, 0x03, 0xd9, 0x43, 0x22, 0x7f, 0x0e, 0x46, 0xec, 0x4f, 0x58, 0x07, 0xb9, 0x16, 0xb6, - 0xc2, 0xe8, 0xa9, 0x37, 0xe4, 0x1a, 0x8d, 0x84, 0xf2, 0x17, 0x33, 0x2f, 0x68, 0x8b, 0xb7, 0x35, - 0x70, 0xb1, 0x7f, 0xfa, 0x3f, 0x54, 0xb3, 0x7e, 0xa5, 0x81, 0xf9, 0x9e, 0x4c, 0xef, 0x63, 0xd1, - 0xad, 0x6e, 0x8b, 0xde, 0x18, 0x76, 0xca, 0xd6, 0x02, 0xdf, 0x74, 0x9a, 0xbc, 0x4f, 0x49, 0x9a, - 0xf7, 0x53, 0x0d, 0xcc, 0xa5, 0x93, 0xe7, 0x61, 0xfa, 0xab, 0x74, 0x3b, 0x03, 0x2e, 0xf6, 0x6f, - 0xaf, 0xa0, 0xaf, 0xde, 0x91, 0xa3, 0x79, 0x8f, 0xf7, 0x9b, 0xdd, 0x7d, 0xa0, 0x81, 0xa9, 0x9b, - 0x4a, 0x2e, 0xfa, 0xb9, 0x70, 0xe8, 0x93, 0x80, 0xa8, 0x5a, 0xc5, 0x0c, 0x8a, 0x92, 0xb8, 0xa5, - 0x3f, 0x6b, 0x60, 0xa1, 0x6f, 0x19, 0x66, 0x0f, 0x56, 0x6c, 0x59, 0xee, 0x91, 0x18, 0xe8, 0x24, - 0xa6, 0xa5, 0xeb, 0x9c, 0x8a, 0x24, 0x37, 0xe1, 0xbd, 0xcc, 0xe7, 0xe5, 0xbd, 0xd2, 0xdf, 0x34, - 0xb0, 0x74, 0xaf, 0x48, 0x7c, 0x28, 0x47, 0xba, 0x0a, 0xf2, 0xb2, 0x85, 0x3a, 0xe1, 0xc7, 0x29, - 0x5f, 0x0d, 0xb2, 0x68, 0xf0, 0xff, 0x90, 0x11, 0x7f, 0x95, 0xde, 0xd7, 0xc0, 0x5c, 0x8d, 0xf8, - 0x2d, 0xd3, 0x20, 0x88, 0x34, 0x88, 0x4f, 0x1c, 0x83, 0xc0, 0x35, 0x30, 0xc9, 0x7f, 0xa7, 0xf3, - 0xb0, 0x11, 0x0d, 0xb1, 0xe7, 0xa5, 0xcb, 0x27, 0x77, 0x22, 0x06, 0x8a, 0x65, 0xd4, 0xc0, 0x3b, - 0x33, 0x70, 0xe0, 0xbd, 0x04, 0xc6, 0xbc, 0x78, 0x1c, 0x98, 0x67, 0x5c, 0x3e, 0x01, 0xe4, 0xd4, - 0xd2, 0xdf, 0x35, 0xd0, 0xef, 0xbf, 0x55, 0x60, 0x0b, 0x4c, 0x50, 0x61, 0x9c, 0x74, 0xde, 0xee, - 0x03, 0x3a, 0x2f, 0xbd, 0x55, 0x71, 0x4d, 0x44, 0xd4, 0x08, 0x8c, 0xf9, 0xcf, 0xc0, 0x95, 0xd0, - 0xa9, 0xcb, 0x01, 0xde, 0xb4, 0xf0, 0x5f, 0x75, 0x5d, 0xd0, 0x90, 0xe2, 0xc2, 0x4b, 0x62, 0xd4, - 0x94, 0x98, 0xdf, 0x44, 0x63, 0xa6, 0xca, 0x95, 0x3b, 0x77, 0x0b, 0xe7, 0x3e, 0xbe, 0x5b, 0x38, - 0xf7, 0xc9, 0xdd, 0xc2, 0xb9, 0xef, 0x76, 0x0a, 0xda, 0x9d, 0x4e, 0x41, 0xfb, 0xb8, 0x53, 0xd0, - 0x3e, 0xe9, 0x14, 0xb4, 0x7f, 0x75, 0x0a, 0xda, 0xcf, 0x3e, 0x2d, 0x9c, 0xfb, 0xe6, 0x84, 0x34, - 0xed, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x16, 0xda, 0x75, 0x14, 0x43, 0x2a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 7e18cb9e..12ecc7ab 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -30,32 +30,33 @@ option go_package = "v1beta1"; // ConversionRequest describes the conversion request parameters. message ConversionRequest { - // `uid` is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. optional string uid = 1; - // `desiredAPIVersion` is the version to convert given objects to. e.g. "myapi.example.com/v1" + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" optional string desiredAPIVersion = 2; - // `objects` is the list of CR objects to be converted. + // objects is the list of custom resource objects to be converted. repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } // ConversionResponse describes a conversion response. message ConversionResponse { - // `uid` is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. optional string uid = 1; - // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. - // The webhook is expected to set apiVersion of these objects to the ConversionRequest.desiredAPIVersion. The list - // must also has the same size as input list with the same objects in the same order(i.e. equal UIDs and object meta) + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; - // `result` contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represent the success or failure of the + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. @@ -64,11 +65,11 @@ message ConversionResponse { // ConversionReview describes a conversion request/response. message ConversionReview { - // `request` describes the attributes for the conversion request. + // request describes the attributes for the conversion request. // +optional optional ConversionRequest request = 1; - // `response` describes the attributes for the conversion response. + // response describes the attributes for the conversion response. // +optional optional ConversionResponse response = 2; } @@ -79,12 +80,12 @@ message CustomResourceColumnDefinition { optional string name = 1; // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. optional string type = 2; // format is an optional OpenAPI type definition for this column. The 'name' format is applied // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. // +optional optional string format = 3; @@ -94,68 +95,71 @@ message CustomResourceColumnDefinition { // priority is an integer defining the relative importance of this column compared to others. Lower // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. + // should be given a priority greater than 0. // +optional optional int32 priority = 5; - // JSONPath is a simple JSON path, i.e. with array notation. + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. optional string JSONPath = 6; } // CustomResourceConversion describes how to convert different versions of a CR. message CustomResourceConversion { - // `strategy` specifies the conversion strategy. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. optional string strategy = 1; - // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. This field is - // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. // +optional optional WebhookClientConfig webhookClientConfig = 2; - // ConversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. API server will try to use first version in + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in // the list which it supports. If none of the versions specified in this list - // supported by API server, conversion will fail for this object. + // are supported by API server, conversion will fail for the custom resource. // If a persisted Webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail. - // Default to `['v1beta1']`. + // Defaults to `["v1beta1"]`. // +optional repeated string conversionReviewVersions = 3; } // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.19. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. message CustomResourceDefinition { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec describes how the user wants the resources to appear + // spec describes how the user wants the resources to appear optional CustomResourceDefinitionSpec spec = 2; - // Status indicates the actual state of the CustomResourceDefinition + // status indicates the actual state of the CustomResourceDefinition // +optional optional CustomResourceDefinitionStatus status = 3; } // CustomResourceDefinitionCondition contains details for the current condition of this pod. message CustomResourceDefinitionCondition { - // Type is the type of the condition. + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. optional string type = 1; - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. optional string status = 2; - // Last time the condition transitioned from one status to another. + // lastTransitionTime last time the condition transitioned from one status to another. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Unique, one-word, CamelCase reason for the condition's last transition. + // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional optional string reason = 4; - // Human-readable message indicating details about last transition. + // message is a human-readable message indicating details about last transition. // +optional optional string message = 5; } @@ -164,71 +168,81 @@ message CustomResourceDefinitionCondition { message CustomResourceDefinitionList { optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items individual CustomResourceDefinitions + // items list individual CustomResourceDefinition objects repeated CustomResourceDefinition items = 2; } // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition message CustomResourceDefinitionNames { - // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration - // too: plural.group and it must be all lowercase. + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. optional string plural = 1; - // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. // +optional optional string singular = 2; - // ShortNames are short names for the resource. It must be all lowercase. + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. // +optional repeated string shortNames = 3; - // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. optional string kind = 4; - // ListKind is the serialized kind of the list for this resource. Defaults to List. + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". // +optional optional string listKind = 5; - // Categories is a list of grouped resources custom resources belong to (e.g. 'all') + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. // +optional repeated string categories = 6; } // CustomResourceDefinitionSpec describes how a user wants their resource to appear message CustomResourceDefinitionSpec { - // Group is the group this resource belongs in + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). optional string group = 1; - // Version is the version this resource belongs in - // Should be always first item in Versions field if provided. - // Optional, but at least one of Version or Versions must be set. - // Deprecated: Please use `Versions`. + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. // +optional optional string version = 2; - // Names are the names used to describe this custom resource + // names specify the resource and kind names for the custom resource. optional CustomResourceDefinitionNames names = 3; - // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. optional string scope = 4; - // Validation describes the validation methods for CustomResources - // Optional, the global validation schema for all versions. + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. // Top-level and per-version schemas are mutually exclusive. // +optional optional CustomResourceValidation validation = 5; - // Subresources describes the subresources for CustomResource - // Optional, the global subresources for all versions. + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. // Top-level and per-version subresources are mutually exclusive. // +optional optional CustomResourceSubresources subresources = 6; - // Versions is the list of all supported versions for this resource. - // If Version field is provided, this field is optional. - // Validation: All versions must use the same validation schema for now. i.e., top - // level Validation field is applied to all of these versions. - // Order: The version name will be used to compute the order. + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first @@ -238,92 +252,106 @@ message CustomResourceDefinitionSpec { // +optional repeated CustomResourceDefinitionVersion versions = 7; - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. - // Optional, the global columns for all versions. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional repeated CustomResourceColumnDefinition additionalPrinterColumns = 8; - // `conversion` defines conversion settings for the CRD. + // conversion defines conversion settings for the CRD. // +optional optional CustomResourceConversion conversion = 9; + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional + optional bool preserveUnknownFields = 10; } // CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition message CustomResourceDefinitionStatus { - // Conditions indicate state for particular aspects of a CustomResourceDefinition + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional repeated CustomResourceDefinitionCondition conditions = 1; - // AcceptedNames are the names that are actually being used to serve discovery + // acceptedNames are the names that are actually being used to serve discovery. // They may be different than the names in spec. optional CustomResourceDefinitionNames acceptedNames = 2; - // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these // versions allows a migration path for stored versions in etcd. The field is mutable - // so the migration controller can first finish a migration to another version (i.e. - // that no old objects are left in the storage), and then remove the rest of the + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the // versions from this list. - // None of the versions in this list can be removed from the spec.Versions field. + // Versions may not be removed from `spec.versions` while they exist in this list. repeated string storedVersions = 3; } // CustomResourceDefinitionVersion describes a version for CRD. message CustomResourceDefinitionVersion { - // Name is the version name, e.g. “v1”, “v2beta1”, etc. + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. optional string name = 1; - // Served is a flag enabling/disabling this version from being served via REST APIs + // served is a flag enabling/disabling this version from being served via REST APIs optional bool served = 2; - // Storage flags the version as storage version. There must be exactly one - // flagged as storage version. + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. optional bool storage = 3; - // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // schema describes the schema used for validation and pruning of this version of the custom resource. // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). // +optional optional CustomResourceValidation schema = 4; - // Subresources describes the subresources for CustomResource + // subresources specify what subresources this version of the defined custom resource have. // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). // +optional optional CustomResourceSubresources subresources = 5; - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an - // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must - // be explicitly set to null + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; } // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. message CustomResourceSubresourceScale { - // SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .spec. - // If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. optional string specReplicasPath = 1; - // StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource // will default to 0. optional string statusReplicasPath = 2; - // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // Must be set to work with HPA. - // If there is no value under the given path in the CustomResource, the status label selector value in the /scale + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` // subresource will default to the empty string. // +optional optional string labelSelectorPath = 3; @@ -339,18 +367,21 @@ message CustomResourceSubresourceStatus { // CustomResourceSubresources defines the status and scale subresources for CustomResources. message CustomResourceSubresources { - // Status denotes the status subresource for CustomResources + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. // +optional optional CustomResourceSubresourceStatus status = 1; - // Scale denotes the scale subresource for CustomResources + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. // +optional optional CustomResourceSubresourceScale scale = 2; } // CustomResourceValidation is a list of validation methods for CustomResources. message CustomResourceValidation { - // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. // +optional optional JSONSchemaProps openAPIV3Schema = 1; } @@ -384,6 +415,9 @@ message JSONSchemaProps { optional string title = 7; + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. optional JSON default = 8; optional double maximum = 9; @@ -443,6 +477,66 @@ message JSONSchemaProps { optional JSON example = 36; optional bool nullable = 37; + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + optional bool xKubernetesPreserveUnknownFields = 38; + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + optional bool xKubernetesEmbeddedResource = 39; + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + optional bool xKubernetesIntOrString = 40; + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + repeated string xKubernetesListMapKeys = 41; + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + optional string xKubernetesListType = 42; } // JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps @@ -470,24 +564,28 @@ message JSONSchemaPropsOrStringArray { // ServiceReference holds a reference to Service.legacy.k8s.io message ServiceReference { - // `namespace` is the namespace of the service. + // namespace is the namespace of the service. // Required optional string namespace = 1; - // `name` is the name of the service. + // name is the name of the service. // Required optional string name = 2; - // `path` is an optional URL path which will be sent in any request to - // this service. + // path is an optional URL path at which the webhook will be contacted. // +optional optional string path = 3; + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + optional int32 port = 4; } -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook. It has the same field as admissionregistration.v1beta1.WebhookClientConfig. +// WebhookClientConfig contains the information to make a TLS connection with the webhook. message WebhookClientConfig { - // `url` gives the location of the webhook, in standard URL form + // url gives the location of the webhook, in standard URL form // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // @@ -516,17 +614,15 @@ message WebhookClientConfig { // +optional optional string url = 3; - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. + // service is a reference to the service for this webhook. Either + // service or url must be specified. // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional optional ServiceReference service = 1; - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. // If unspecified, system trust roots on the apiserver are used. // +optional optional bytes caBundle = 2; diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index 973ac0ee..f6c260b6 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -26,6 +26,11 @@ import ( type ConversionStrategyType string const ( + // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. + // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. + // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. + KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. NoneConverter ConversionStrategyType = "None" // WebhookConverter is a converter that calls to an external webhook to convert the CR. @@ -34,33 +39,36 @@ const ( // CustomResourceDefinitionSpec describes how a user wants their resource to appear type CustomResourceDefinitionSpec struct { - // Group is the group this resource belongs in + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). Group string `json:"group" protobuf:"bytes,1,opt,name=group"` - // Version is the version this resource belongs in - // Should be always first item in Versions field if provided. - // Optional, but at least one of Version or Versions must be set. - // Deprecated: Please use `Versions`. + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. // +optional Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` - // Names are the names used to describe this custom resource + // names specify the resource and kind names for the custom resource. Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` - // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` - // Validation describes the validation methods for CustomResources - // Optional, the global validation schema for all versions. + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. // Top-level and per-version schemas are mutually exclusive. // +optional Validation *CustomResourceValidation `json:"validation,omitempty" protobuf:"bytes,5,opt,name=validation"` - // Subresources describes the subresources for CustomResource - // Optional, the global subresources for all versions. + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. // Top-level and per-version subresources are mutually exclusive. // +optional Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,6,opt,name=subresources"` - // Versions is the list of all supported versions for this resource. - // If Version field is provided, this field is optional. - // Validation: All versions must use the same validation schema for now. i.e., top - // level Validation field is applied to all of these versions. - // Order: The version name will be used to compute the order. + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first @@ -69,44 +77,57 @@ type CustomResourceDefinitionSpec struct { // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. // +optional Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. - // Optional, the global columns for all versions. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` - // `conversion` defines conversion settings for the CRD. + // conversion defines conversion settings for the CRD. // +optional Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional + PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` } // CustomResourceConversion describes how to convert different versions of a CR. type CustomResourceConversion struct { - // `strategy` specifies the conversion strategy. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` - // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. This field is - // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. // +optional WebhookClientConfig *WebhookClientConfig `json:"webhookClientConfig,omitempty" protobuf:"bytes,2,name=webhookClientConfig"` - // ConversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. API server will try to use first version in + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in // the list which it supports. If none of the versions specified in this list - // supported by API server, conversion will fail for this object. + // are supported by API server, conversion will fail for the custom resource. // If a persisted Webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail. - // Default to `['v1beta1']`. + // Defaults to `["v1beta1"]`. // +optional ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty" protobuf:"bytes,3,rep,name=conversionReviewVersions"` } -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook. It has the same field as admissionregistration.v1beta1.WebhookClientConfig. +// WebhookClientConfig contains the information to make a TLS connection with the webhook. type WebhookClientConfig struct { - // `url` gives the location of the webhook, in standard URL form + // url gives the location of the webhook, in standard URL form // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // @@ -135,17 +156,15 @@ type WebhookClientConfig struct { // +optional URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. + // service is a reference to the service for this webhook. Either + // service or url must be specified. // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. // If unspecified, system trust roots on the apiserver are used. // +optional CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` @@ -153,47 +172,49 @@ type WebhookClientConfig struct { // ServiceReference holds a reference to Service.legacy.k8s.io type ServiceReference struct { - // `namespace` is the namespace of the service. + // namespace is the namespace of the service. // Required Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // `name` is the name of the service. + // name is the name of the service. // Required Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // `path` is an optional URL path which will be sent in any request to - // this service. + // path is an optional URL path at which the webhook will be contacted. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } // CustomResourceDefinitionVersion describes a version for CRD. type CustomResourceDefinitionVersion struct { - // Name is the version name, e.g. “v1”, “v2beta1”, etc. + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Served is a flag enabling/disabling this version from being served via REST APIs + // served is a flag enabling/disabling this version from being served via REST APIs Served bool `json:"served" protobuf:"varint,2,opt,name=served"` - // Storage flags the version as storage version. There must be exactly one - // flagged as storage version. + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` - // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // schema describes the schema used for validation and pruning of this version of the custom resource. // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). // +optional Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` - // Subresources describes the subresources for CustomResource + // subresources specify what subresources this version of the defined custom resource have. // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). // +optional Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an - // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must - // be explicitly set to null + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` } @@ -203,11 +224,11 @@ type CustomResourceColumnDefinition struct { // name is a human readable name for the column. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. Type string `json:"type" protobuf:"bytes,2,opt,name=type"` // format is an optional OpenAPI type definition for this column. The 'name' format is applied // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. // +optional Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` // description is a human readable description of this column. @@ -215,31 +236,38 @@ type CustomResourceColumnDefinition struct { Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` // priority is an integer defining the relative importance of this column compared to others. Lower // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. + // should be given a priority greater than 0. // +optional Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` - - // JSONPath is a simple JSON path, i.e. with array notation. + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. JSONPath string `json:"JSONPath" protobuf:"bytes,6,opt,name=JSONPath"` } // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition type CustomResourceDefinitionNames struct { - // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration - // too: plural.group and it must be all lowercase. + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` - // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. // +optional Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` - // ShortNames are short names for the resource. It must be all lowercase. + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. // +optional ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` - // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` - // ListKind is the serialized kind of the list for this resource. Defaults to List. + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". // +optional ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` - // Categories is a list of grouped resources custom resources belong to (e.g. 'all') + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. // +optional Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` } @@ -275,43 +303,65 @@ const ( // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in // the group and are therefore accepted. NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" ) // CustomResourceDefinitionCondition contains details for the current condition of this pod. type CustomResourceDefinitionCondition struct { - // Type is the type of the condition. + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time the condition transitioned from one status to another. + // lastTransitionTime last time the condition transitioned from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Unique, one-word, CamelCase reason for the condition's last transition. + // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Human-readable message indicating details about last transition. + // message is a human-readable message indicating details about last transition. // +optional Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition type CustomResourceDefinitionStatus struct { - // Conditions indicate state for particular aspects of a CustomResourceDefinition + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` - // AcceptedNames are the names that are actually being used to serve discovery + // acceptedNames are the names that are actually being used to serve discovery. // They may be different than the names in spec. AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` - // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these // versions allows a migration path for stored versions in etcd. The field is mutable - // so the migration controller can first finish a migration to another version (i.e. - // that no old objects are left in the storage), and then remove the rest of the + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the // versions from this list. - // None of the versions in this list can be removed from the spec.Versions field. + // Versions may not be removed from `spec.versions` while they exist in this list. StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` } @@ -325,13 +375,14 @@ const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s. // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.19. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. type CustomResourceDefinition struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec describes how the user wants the resources to appear + // spec describes how the user wants the resources to appear Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status indicates the actual state of the CustomResourceDefinition + // status indicates the actual state of the CustomResourceDefinition // +optional Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -343,23 +394,26 @@ type CustomResourceDefinitionList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items individual CustomResourceDefinitions + // items list individual CustomResourceDefinition objects Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` } // CustomResourceValidation is a list of validation methods for CustomResources. type CustomResourceValidation struct { - // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. // +optional OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` } // CustomResourceSubresources defines the status and scale subresources for CustomResources. type CustomResourceSubresources struct { - // Status denotes the status subresource for CustomResources + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. // +optional Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` - // Scale denotes the scale subresource for CustomResources + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. // +optional Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` } @@ -373,22 +427,25 @@ type CustomResourceSubresourceStatus struct{} // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. type CustomResourceSubresourceScale struct { - // SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .spec. - // If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` - // StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource // will default to 0. StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` - // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // Must be set to work with HPA. - // If there is no value under the given path in the CustomResource, the status label selector value in the /scale + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` // subresource will default to the empty string. // +optional LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` @@ -399,38 +456,39 @@ type CustomResourceSubresourceScale struct { // ConversionReview describes a conversion request/response. type ConversionReview struct { metav1.TypeMeta `json:",inline"` - // `request` describes the attributes for the conversion request. + // request describes the attributes for the conversion request. // +optional Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` - // `response` describes the attributes for the conversion response. + // response describes the attributes for the conversion response. // +optional Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` } // ConversionRequest describes the conversion request parameters. type ConversionRequest struct { - // `uid` is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // `desiredAPIVersion` is the version to convert given objects to. e.g. "myapi.example.com/v1" + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` - // `objects` is the list of CR objects to be converted. + // objects is the list of custom resource objects to be converted. Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` } // ConversionResponse describes a conversion response. type ConversionResponse struct { - // `uid` is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. - // The webhook is expected to set apiVersion of these objects to the ConversionRequest.desiredAPIVersion. The list - // must also has the same size as input list with the same objects in the same order(i.e. equal UIDs and object meta) + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` - // `result` contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represent the success or failure of the + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 54c0a4ae..d71a5a02 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -18,13 +18,16 @@ package v1beta1 // JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). type JSONSchemaProps struct { - ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` - Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` - Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` - Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` - Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` + Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` + Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` + Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` @@ -55,6 +58,66 @@ type JSONSchemaProps struct { ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index e7ae745b..64073df0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -24,6 +24,7 @@ import ( unsafe "unsafe" apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -295,7 +296,15 @@ func Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResou func autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) - out.WebhookClientConfig = (*apiextensions.WebhookClientConfig)(unsafe.Pointer(in.WebhookClientConfig)) + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(apiextensions.WebhookClientConfig) + if err := Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(*in, *out, s); err != nil { + return err + } + } else { + out.WebhookClientConfig = nil + } out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) return nil } @@ -307,7 +316,15 @@ func Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceCon func autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { out.Strategy = ConversionStrategyType(in.Strategy) - out.WebhookClientConfig = (*WebhookClientConfig)(unsafe.Pointer(in.WebhookClientConfig)) + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + if err := Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(*in, *out, s); err != nil { + return err + } + } else { + out.WebhookClientConfig = nil + } out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) return nil } @@ -478,7 +495,16 @@ func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomRes out.Versions = nil } out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - out.Conversion = (*apiextensions.CustomResourceConversion)(unsafe.Pointer(in.Conversion)) + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(apiextensions.CustomResourceConversion) + if err := Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) return nil } @@ -516,7 +542,16 @@ func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomRes out.Versions = nil } out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - out.Conversion = (*CustomResourceConversion)(unsafe.Pointer(in.Conversion)) + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + if err := Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) return nil } @@ -905,6 +940,11 @@ func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JS out.Example = nil } out.Nullable = in.Nullable + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) return nil } @@ -1087,6 +1127,11 @@ func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *ap } else { out.Example = nil } + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) return nil } @@ -1228,6 +1273,9 @@ func autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in * out.Namespace = in.Namespace out.Name = in.Name out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } return nil } @@ -1240,6 +1288,9 @@ func autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in * out.Namespace = in.Namespace out.Name = in.Name out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } return nil } @@ -1250,7 +1301,15 @@ func Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apie func autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { out.URL = (*string)(unsafe.Pointer(in.URL)) - out.Service = (*apiextensions.ServiceReference)(unsafe.Pointer(in.Service)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiextensions.ServiceReference) + if err := Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) return nil } @@ -1262,7 +1321,15 @@ func Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in func autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { out.URL = (*string)(unsafe.Pointer(in.URL)) - out.Service = (*ServiceReference)(unsafe.Pointer(in.Service)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 20ded01b..82bbb2be 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -197,7 +197,7 @@ func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitio func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CustomResourceDefinition, len(*in)) @@ -283,6 +283,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti *out = new(CustomResourceConversion) (*in).DeepCopyInto(*out) } + if in.PreserveUnknownFields != nil { + in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields + *out = new(bool) + **out = **in + } return } @@ -607,6 +612,11 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go index f65f47a0..e1807243 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go @@ -38,6 +38,13 @@ func RegisterDefaults(scheme *runtime.Scheme) error { func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { SetDefaults_CustomResourceDefinition(in) SetDefaults_CustomResourceDefinitionSpec(&in.Spec) + if in.Spec.Conversion != nil { + if in.Spec.Conversion.WebhookClientConfig != nil { + if in.Spec.Conversion.WebhookClientConfig.Service != nil { + SetDefaults_ServiceReference(in.Spec.Conversion.WebhookClientConfig.Service) + } + } + } } func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index 667e556a..682e6fd4 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -115,7 +115,7 @@ func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitio func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CustomResourceDefinition, len(*in)) @@ -201,6 +201,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti *out = new(CustomResourceConversion) (*in).DeepCopyInto(*out) } + if in.PreserveUnknownFields != nil { + in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go index aa2dbcbb..de6ad042 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -19,6 +19,9 @@ limitations under the License. package clientset import ( + "fmt" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" @@ -28,6 +31,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface + ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -35,6 +39,7 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient apiextensionsV1beta1 *apiextensionsv1beta1.ApiextensionsV1beta1Client + apiextensionsV1 *apiextensionsv1.ApiextensionsV1Client } // ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client @@ -42,6 +47,11 @@ func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1b return c.apiextensionsV1beta1 } +// ApiextensionsV1 retrieves the ApiextensionsV1Client +func (c *Clientset) ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface { + return c.apiextensionsV1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -51,9 +61,14 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset @@ -62,6 +77,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.apiextensionsV1, err = apiextensionsv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -75,6 +94,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.apiextensionsV1beta1 = apiextensionsv1beta1.NewForConfigOrDie(c) + cs.apiextensionsV1 = apiextensionsv1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -84,6 +104,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.apiextensionsV1beta1 = apiextensionsv1beta1.New(c) + cs.apiextensionsV1 = apiextensionsv1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go index 4232ce5c..144c2066 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go @@ -19,6 +19,7 @@ limitations under the License. package scheme import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -32,6 +33,7 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ apiextensionsv1beta1.AddToScheme, + apiextensionsv1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go new file mode 100644 index 00000000..8823cb6a --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + rest "k8s.io/client-go/rest" +) + +type ApiextensionsV1Interface interface { + RESTClient() rest.Interface + CustomResourceDefinitionsGetter +} + +// ApiextensionsV1Client is used to interact with features provided by the apiextensions.k8s.io group. +type ApiextensionsV1Client struct { + restClient rest.Interface +} + +func (c *ApiextensionsV1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface { + return newCustomResourceDefinitions(c) +} + +// NewForConfig creates a new ApiextensionsV1Client for the given config. +func NewForConfig(c *rest.Config) (*ApiextensionsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ApiextensionsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiextensionsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiextensionsV1Client for the given RESTClient. +func New(c rest.Interface) *ApiextensionsV1Client { + return &ApiextensionsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiextensionsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go new file mode 100644 index 00000000..e3fa4369 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. +// A group's client should implement this interface. +type CustomResourceDefinitionsGetter interface { + CustomResourceDefinitions() CustomResourceDefinitionInterface +} + +// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. +type CustomResourceDefinitionInterface interface { + Create(*v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) + Update(*v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) + UpdateStatus(*v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.CustomResourceDefinition, error) + List(opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CustomResourceDefinition, err error) + CustomResourceDefinitionExpansion +} + +// customResourceDefinitions implements CustomResourceDefinitionInterface +type customResourceDefinitions struct { + client rest.Interface +} + +// newCustomResourceDefinitions returns a CustomResourceDefinitions +func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions { + return &customResourceDefinitions{ + client: c.RESTClient(), + } +} + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *customResourceDefinitions) Get(name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *customResourceDefinitions) List(opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CustomResourceDefinitionList{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *customResourceDefinitions) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Create(customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Post(). + Resource("customresourcedefinitions"). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Update(customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *customResourceDefinitions) UpdateStatus(customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + SubResource("status"). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *customResourceDefinitions) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("customresourcedefinitions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customResourceDefinitions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("customresourcedefinitions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *customResourceDefinitions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Patch(pt). + Resource("customresourcedefinitions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go new file mode 100644 index 00000000..3af5d054 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go new file mode 100644 index 00000000..e594636a --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type CustomResourceDefinitionExpansion interface{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go index a1fd337f..ff1ec4f2 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go @@ -21,7 +21,6 @@ package v1beta1 import ( v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go new file mode 100644 index 00000000..f02fa8e4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go @@ -0,0 +1,49 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equality + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index afd97f7a..95d5c7a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -32,7 +32,9 @@ import ( const ( // StatusTooManyRequests means the server experienced too many requests within a // given window and that the client must wait to perform the action again. - StatusTooManyRequests = 429 + // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in + // the future version. + StatusTooManyRequests = http.StatusTooManyRequests ) // StatusError is an error intended for consumption by a REST API server; it can also be @@ -349,7 +351,7 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { func NewTooManyRequestsError(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusTooManyRequests, + Code: http.StatusTooManyRequests, Reason: metav1.StatusReasonTooManyRequests, Message: fmt.Sprintf("Too many requests: %s", message), }} @@ -394,7 +396,11 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr case http.StatusNotAcceptable: reason = metav1.StatusReasonNotAcceptable // the server message has details about what types are acceptable - message = serverMessage + if len(serverMessage) == 0 || serverMessage == "unknown" { + message = "the server was unable to respond with a content type that the client supports" + } else { + message = serverMessage + } case http.StatusUnsupportedMediaType: reason = metav1.StatusReasonUnsupportedMediaType // the server message has details about what types are acceptable @@ -617,3 +623,46 @@ func ReasonForError(err error) metav1.StatusReason { } return metav1.StatusReasonUnknown } + +// ErrorReporter converts generic errors into runtime.Object errors without +// requiring the caller to take a dependency on meta/v1 (where Status lives). +// This prevents circular dependencies in core watch code. +type ErrorReporter struct { + code int + verb string + reason string +} + +// NewClientErrorReporter will respond with valid v1.Status objects that report +// unexpected server responses. Primarily used by watch to report errors when +// we attempt to decode a response from the server and it is not in the form +// we expect. Because watch is a dependency of the core api, we can't return +// meta/v1.Status in that package and so much inject this interface to convert a +// generic error as appropriate. The reason is passed as a unique status cause +// on the returned status, otherwise the generic "ClientError" is returned. +func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter { + return &ErrorReporter{ + code: code, + verb: verb, + reason: reason, + } +} + +// AsObject returns a valid error runtime.Object (a v1.Status) for the given +// error, using the code and verb of the reporter type. The error is set to +// indicate that this was an unexpected server response. +func (r *ErrorReporter) AsObject(err error) runtime.Object { + status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true) + if status.ErrStatus.Details == nil { + status.ErrStatus.Details = &metav1.StatusDetails{} + } + reason := r.reason + if len(reason) == 0 { + reason = "ClientError" + } + status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: metav1.CauseType(reason), + Message: err.Error(), + }) + return &status.ErrStatus +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 3425055f..50468b53 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -17,30 +17,76 @@ limitations under the License. package meta import ( + "errors" "fmt" "reflect" + "sync" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" ) -// IsListType returns true if the provided Object has a slice called Items +var ( + // isListCache maintains a cache of types that are checked for lists + // which is used by IsListType. + // TODO: remove and replace with an interface check + isListCache = struct { + lock sync.RWMutex + byType map[reflect.Type]bool + }{ + byType: make(map[reflect.Type]bool, 1024), + } +) + +// IsListType returns true if the provided Object has a slice called Items. +// TODO: Replace the code in this check with an interface comparison by +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { - // if we're a runtime.Unstructured, check whether this is a list. - // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object - if unstructured, ok := obj.(runtime.Unstructured); ok { - return unstructured.IsList() + switch t := obj.(type) { + case runtime.Unstructured: + return t.IsList() } + t := reflect.TypeOf(obj) - _, err := GetItemsPtr(obj) - return err == nil + isListCache.lock.RLock() + ok, exists := isListCache.byType[t] + isListCache.lock.RUnlock() + + if !exists { + _, err := getItemsPtr(obj) + ok = err == nil + + // cache only the first 1024 types + isListCache.lock.Lock() + if len(isListCache.byType) < 1024 { + isListCache.byType[t] = ok + } + isListCache.lock.Unlock() + } + + return ok } +var ( + errExpectFieldItems = errors.New("no Items field in this object") + errExpectSliceItems = errors.New("Items field must be a slice of objects") +) + // GetItemsPtr returns a pointer to the list object's Items member. // If 'list' doesn't have an Items member, it's not really a list type // and an error will be returned. // This function will either return a pointer to a slice, or an error, but not both. +// TODO: this will be replaced with an interface in the future func GetItemsPtr(list runtime.Object) (interface{}, error) { + obj, err := getItemsPtr(list) + if err != nil { + return nil, fmt.Errorf("%T is not a list: %v", list, err) + } + return obj, nil +} + +// getItemsPtr returns a pointer to the list object's Items member or an error. +func getItemsPtr(list runtime.Object) (interface{}, error) { v, err := conversion.EnforcePtr(list) if err != nil { return nil, err @@ -48,19 +94,19 @@ func GetItemsPtr(list runtime.Object) (interface{}, error) { items := v.FieldByName("Items") if !items.IsValid() { - return nil, fmt.Errorf("no Items field in %#v", list) + return nil, errExpectFieldItems } switch items.Kind() { case reflect.Interface, reflect.Ptr: target := reflect.TypeOf(items.Interface()).Elem() if target.Kind() != reflect.Slice { - return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) + return nil, errExpectSliceItems } return items.Interface(), nil case reflect.Slice: return items.Addr().Interface(), nil default: - return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) + return nil, errExpectSliceItems } } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index b50337e1..fa4b7673 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -21,7 +21,6 @@ import ( "reflect" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -114,12 +113,12 @@ func Accessor(obj interface{}) (metav1.Object, error) { // AsPartialObjectMetadata takes the metav1 interface and returns a partial object. // TODO: consider making this solely a conversion action. -func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata { +func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { switch t := m.(type) { case *metav1.ObjectMeta: - return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t} + return &metav1.PartialObjectMetadata{ObjectMeta: *t} default: - return &metav1beta1.PartialObjectMetadata{ + return &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: m.GetName(), GenerateName: m.GetGenerateName(), @@ -136,7 +135,6 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata OwnerReferences: m.GetOwnerReferences(), Finalizers: m.GetFinalizers(), ClusterName: m.GetClusterName(), - Initializers: m.GetInitializers(), ManagedFields: m.GetManagedFields(), }, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 9d7835bc..9fca2e16 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -17,20 +17,15 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto -/* -Package resource is a generated protocol buffer package. +package resource -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +import ( + fmt "fmt" -It has these top-level messages: - Quantity -*/ -package resource + math "math" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,19 +38,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Quantity) Reset() { *m = Quantity{} } -func (*Quantity) ProtoMessage() {} -func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { + return fileDescriptor_612bba87bd70906c, []int{0} +} +func (m *Quantity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantity.Unmarshal(m, b) +} +func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantity.Marshal(b, m, deterministic) +} +func (m *Quantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantity.Merge(m, src) +} +func (m *Quantity) XXX_Size() int { + return xxx_messageInfo_Quantity.Size(m) +} +func (m *Quantity) XXX_DiscardUnknown() { + xxx_messageInfo_Quantity.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantity proto.InternalMessageInfo func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_612bba87bd70906c = []byte{ // 237 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index acc90444..18a6c7cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -26,7 +26,7 @@ option go_package = "resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 72d3880c..7f63175d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -194,9 +194,9 @@ func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { } if fraction { if base > 0 { - value += 1 + value++ } else { - value += -1 + value-- } } return value, !fraction diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 54fda580..516d041d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -29,7 +29,7 @@ import ( // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // @@ -584,6 +584,12 @@ func (q *Quantity) Neg() { q.d.Dec.Neg(q.d.Dec) } +// Equal checks equality of two Quantities. This is useful for testing with +// cmp.Equal. +func (q Quantity) Equal(v Quantity) bool { + return q.Cmp(v) == 0 +} + // int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation // of most Quantity values. const int64QuantityExpectedBytes = 18 @@ -691,7 +697,9 @@ func (q *Quantity) MilliValue() int64 { return q.ScaledValue(Milli) } -// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. +// ScaledValue returns the value of ceil(q / 10^scale). +// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000. +// This could overflow an int64. // To detect overflow, call Value() first and verify the expected magnitude. func (q *Quantity) ScaledValue(scale Scale) int64 { if q.d.Dec == nil { @@ -718,21 +726,3 @@ func (q *Quantity) SetScaled(value int64, scale Scale) { q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } - -// Copy is a convenience function that makes a deep copy for you. Non-deep -// copies of quantities share pointers and you will regret that. -func (q *Quantity) Copy() *Quantity { - if q.d.Dec == nil { - return &Quantity{ - s: q.s, - i: q.i, - Format: q.Format, - } - } - tmp := &inf.Dec{} - return &Quantity{ - s: q.s, - d: infDecAmount{tmp.Set(q.d.Dec)}, - Format: q.Format, - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index 74dfb4e4..f89ca163 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -19,6 +19,7 @@ package resource import ( "fmt" "io" + "math/bits" "github.com/gogo/protobuf/proto" ) @@ -28,7 +29,7 @@ var _ proto.Sizer = &Quantity{} func (m *Quantity) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) - n, err := m.MarshalTo(data) + n, err := m.MarshalToSizedBuffer(data[:size]) if err != nil { return nil, err } @@ -38,30 +39,40 @@ func (m *Quantity) Marshal() (data []byte, err error) { // MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct // with a single string field. func (m *Quantity) MarshalTo(data []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(data[:size]) +} + +// MarshalToSizedBuffer is a customized version of the generated +// Protobuf unmarshaler for a struct with a single string field. +func (m *Quantity) MarshalToSizedBuffer(data []byte) (int, error) { + i := len(data) _ = i var l int _ = l - data[i] = 0xa - i++ // BEGIN CUSTOM MARSHAL out := m.String() + i -= len(out) + copy(data[i:], out) i = encodeVarintGenerated(data, i, uint64(len(out))) - i += copy(data[i:], out) // END CUSTOM MARSHAL + i-- + data[i] = 0xa - return i, nil + return len(data) - i, nil } func encodeVarintGenerated(data []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } data[offset] = uint8(v) - return offset + 1 + return base } func (m *Quantity) Size() (n int) { @@ -77,14 +88,7 @@ func (m *Quantity) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (bits.Len64(x|1) + 6) / 7 } // Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go deleted file mode 100644 index bdb71340..00000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" -) - -func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *metav1.ListOptions, s conversion.Scope) error { - if err := metav1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - if err := metav1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - out.Watch = in.Watch - out.Limit = in.Limit - out.Continue = in.Continue - return nil -} - -func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOptions, out *ListOptions, s conversion.Scope) error { - if err := metav1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - if err := metav1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - out.Watch = in.Watch - out.Limit = in.Limit - out.Continue = in.Continue - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go index 46b8605f..3fea2c38 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -66,9 +66,6 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) metav1.Convert_Map_string_To_string_To_v1_LabelSelector, metav1.Convert_v1_LabelSelector_To_Map_string_To_string, - - Convert_internalversion_ListOptions_To_v1_ListOptions, - Convert_v1_ListOptions_To_internalversion_ListOptions, ) if err != nil { return err @@ -89,12 +86,12 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &metav1beta1.PartialObjectMetadata{}, &metav1beta1.PartialObjectMetadataList{}, ) - scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, - &metav1beta1.Table{}, - &metav1beta1.TableOptions{}, - &metav1beta1.PartialObjectMetadata{}, - &metav1beta1.PartialObjectMetadataList{}, - ) + if err := metav1beta1.AddMetaToScheme(scheme); err != nil { + return err + } + if err := metav1.AddMetaToScheme(scheme); err != nil { + return err + } // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) scheme.AddUnversionedTypes(SchemeGroupVersion, &metav1.DeleteOptions{}, diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index e434e505..8d254416 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -35,6 +35,15 @@ type ListOptions struct { FieldSelector fields.Selector // If true, watch for changes to this list Watch bool + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + AllowWatchBookmarks bool // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 79b75673..35adbca1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -55,16 +55,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*ListOptions)(nil), (*v1.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_internalversion_ListOptions_To_v1_ListOptions(a.(*ListOptions), b.(*v1.ListOptions), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ListOptions_To_internalversion_ListOptions(a.(*v1.ListOptions), b.(*ListOptions), scope) - }); err != nil { - return err - } return nil } @@ -118,6 +108,7 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, return err } out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit @@ -125,6 +116,11 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, return nil } +// Convert_internalversion_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + return autoConvert_internalversion_ListOptions_To_v1_ListOptions(in, out, s) +} + func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err @@ -133,9 +129,15 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption return err } out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue return nil } + +// Convert_v1_ListOptions_To_internalversion_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_internalversion_ListOptions(in, out, s) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 81d85e96..d5e4fc68 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -28,7 +28,7 @@ import ( func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.Object, len(*in)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index 5c36f82c..d07069ef 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -77,6 +77,8 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_Slice_string_To_Slice_int32, Convert_Slice_string_To_v1_DeletionPropagation, + + Convert_Slice_string_To_v1_IncludeObjectPolicy, ) } @@ -317,3 +319,11 @@ func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *Deleti } return nil } + +// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*input) > 0 { + *out = IncludeObjectPolicy((*input)[0]) + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go new file mode 100644 index 00000000..8751d052 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func (in *TableRow) DeepCopy() *TableRow { + if in == nil { + return nil + } + + out := new(TableRow) + + if in.Cells != nil { + out.Cells = make([]interface{}, len(in.Cells)) + for i := range in.Cells { + out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) + } + } + + if in.Conditions != nil { + out.Conditions = make([]TableRowCondition, len(in.Conditions)) + for i := range in.Conditions { + in.Conditions[i].DeepCopyInto(&out.Conditions[i]) + } + } + + in.Object.DeepCopyInto(&out.Object) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 69e65099..31b1d955 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -17,73 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - CreateOptions - DeleteOptions - Duration - ExportOptions - Fields - GetOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - Initializer - Initializers - LabelSelector - LabelSelectorRequirement - List - ListMeta - ListOptions - ManagedFieldsEntry - MicroTime - ObjectMeta - OwnerReference - Patch - PatchOptions - Preconditions - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - Time - Timestamp - TypeMeta - UpdateOptions - Verbs - WatchEvent -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import time "time" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -97,179 +49,1199 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{0} +} +func (m *APIGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroup.Merge(m, src) +} +func (m *APIGroup) XXX_Size() int { + return m.Size() +} +func (m *APIGroup) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroup proto.InternalMessageInfo + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{1} +} +func (m *APIGroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupList.Merge(m, src) +} +func (m *APIGroupList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupList proto.InternalMessageInfo + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{2} +} +func (m *APIResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResource.Merge(m, src) +} +func (m *APIResource) XXX_Size() int { + return m.Size() +} +func (m *APIResource) XXX_DiscardUnknown() { + xxx_messageInfo_APIResource.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResource proto.InternalMessageInfo + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{3} +} +func (m *APIResourceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceList.Merge(m, src) +} +func (m *APIResourceList) XXX_Size() int { + return m.Size() +} +func (m *APIResourceList) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResourceList proto.InternalMessageInfo + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{4} +} +func (m *APIVersions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersions.Merge(m, src) +} +func (m *APIVersions) XXX_Size() int { + return m.Size() +} +func (m *APIVersions) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersions.DiscardUnknown(m) +} + +var xxx_messageInfo_APIVersions proto.InternalMessageInfo -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *CreateOptions) Reset() { *m = CreateOptions{} } +func (*CreateOptions) ProtoMessage() {} +func (*CreateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{5} +} +func (m *CreateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CreateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateOptions.Merge(m, src) +} +func (m *CreateOptions) XXX_Size() int { + return m.Size() +} +func (m *CreateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CreateOptions.DiscardUnknown(m) +} -func (m *APIResource) Reset() { *m = APIResource{} } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_CreateOptions proto.InternalMessageInfo -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{6} +} +func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeleteOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteOptions.Merge(m, src) +} +func (m *DeleteOptions) XXX_Size() int { + return m.Size() +} +func (m *DeleteOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteOptions.DiscardUnknown(m) +} -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo -func (m *CreateOptions) Reset() { *m = CreateOptions{} } -func (*CreateOptions) ProtoMessage() {} -func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{7} +} +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{8} +} +func (m *ExportOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExportOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportOptions.Merge(m, src) +} +func (m *ExportOptions) XXX_Size() int { + return m.Size() +} +func (m *ExportOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExportOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportOptions proto.InternalMessageInfo -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *FieldsV1) Reset() { *m = FieldsV1{} } +func (*FieldsV1) ProtoMessage() {} +func (*FieldsV1) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{9} +} +func (m *FieldsV1) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldsV1) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldsV1.Merge(m, src) +} +func (m *FieldsV1) XXX_Size() int { + return m.Size() +} +func (m *FieldsV1) XXX_DiscardUnknown() { + xxx_messageInfo_FieldsV1.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{10} +} +func (m *GetOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GetOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOptions.Merge(m, src) +} +func (m *GetOptions) XXX_Size() int { + return m.Size() +} +func (m *GetOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GetOptions.DiscardUnknown(m) +} -func (m *Duration) Reset() { *m = Duration{} } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_GetOptions proto.InternalMessageInfo -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{11} +} +func (m *GroupKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupKind.Merge(m, src) +} +func (m *GroupKind) XXX_Size() int { + return m.Size() +} +func (m *GroupKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupKind.DiscardUnknown(m) +} -func (m *Fields) Reset() { *m = Fields{} } -func (*Fields) ProtoMessage() {} -func (*Fields) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_GroupKind proto.InternalMessageInfo -func (m *GetOptions) Reset() { *m = GetOptions{} } -func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{12} +} +func (m *GroupResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResource.Merge(m, src) +} +func (m *GroupResource) XXX_Size() int { + return m.Size() +} +func (m *GroupResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResource.DiscardUnknown(m) +} -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_GroupResource proto.InternalMessageInfo -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{13} +} +func (m *GroupVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersion.Merge(m, src) +} +func (m *GroupVersion) XXX_Size() int { + return m.Size() +} +func (m *GroupVersion) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersion.DiscardUnknown(m) +} -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_cf52fa777ced5367, []int{14} +} +func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src) +} +func (m *GroupVersionForDiscovery) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m) } -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{15} +} +func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionKind.Merge(m, src) +} +func (m *GroupVersionKind) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionKind.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{16} +} +func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionResource.Merge(m, src) +} +func (m *GroupVersionResource) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) +} -func (m *Initializer) Reset() { *m = Initializer{} } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo -func (m *Initializers) Reset() { *m = Initializers{} } -func (*Initializers) ProtoMessage() {} -func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{17} +} +func (m *LabelSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelector.Merge(m, src) +} +func (m *LabelSelector) XXX_Size() int { + return m.Size() +} +func (m *LabelSelector) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelector.DiscardUnknown(m) +} -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_cf52fa777ced5367, []int{18} +} +func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorRequirement.Merge(m, src) +} +func (m *LabelSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{19} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} + +var xxx_messageInfo_List proto.InternalMessageInfo + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{20} +} +func (m *ListMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMeta.Merge(m, src) +} +func (m *ListMeta) XXX_Size() int { + return m.Size() +} +func (m *ListMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ListMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMeta proto.InternalMessageInfo + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{21} +} +func (m *ListOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOptions.Merge(m, src) } +func (m *ListOptions) XXX_Size() int { + return m.Size() +} +func (m *ListOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ListOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOptions proto.InternalMessageInfo + +func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } +func (*ManagedFieldsEntry) ProtoMessage() {} +func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{22} +} +func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedFieldsEntry.Merge(m, src) +} +func (m *ManagedFieldsEntry) XXX_Size() int { + return m.Size() +} +func (m *ManagedFieldsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{23} +} +func (m *MicroTime) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MicroTime.Unmarshal(m, b) +} +func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic) +} +func (m *MicroTime) XXX_Merge(src proto.Message) { + xxx_messageInfo_MicroTime.Merge(m, src) +} +func (m *MicroTime) XXX_Size() int { + return xxx_messageInfo_MicroTime.Size(m) +} +func (m *MicroTime) XXX_DiscardUnknown() { + xxx_messageInfo_MicroTime.DiscardUnknown(m) +} + +var xxx_messageInfo_MicroTime proto.InternalMessageInfo -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{24} +} +func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMeta.Merge(m, src) +} +func (m *ObjectMeta) XXX_Size() int { + return m.Size() +} +func (m *ObjectMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo + +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{25} +} +func (m *OwnerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OwnerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OwnerReference.Merge(m, src) +} +func (m *OwnerReference) XXX_Size() int { + return m.Size() +} +func (m *OwnerReference) XXX_DiscardUnknown() { + xxx_messageInfo_OwnerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_OwnerReference proto.InternalMessageInfo + +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{26} +} +func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadata.Merge(m, src) +} +func (m *PartialObjectMetadata) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{27} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo -func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } -func (*ManagedFieldsEntry) ProtoMessage() {} -func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{28} +} +func (m *Patch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Patch) XXX_Merge(src proto.Message) { + xxx_messageInfo_Patch.Merge(m, src) +} +func (m *Patch) XXX_Size() int { + return m.Size() +} +func (m *Patch) XXX_DiscardUnknown() { + xxx_messageInfo_Patch.DiscardUnknown(m) +} -func (m *MicroTime) Reset() { *m = MicroTime{} } -func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +var xxx_messageInfo_Patch proto.InternalMessageInfo -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (m *PatchOptions) Reset() { *m = PatchOptions{} } +func (*PatchOptions) ProtoMessage() {} +func (*PatchOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{29} +} +func (m *PatchOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PatchOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PatchOptions.Merge(m, src) +} +func (m *PatchOptions) XXX_Size() int { + return m.Size() +} +func (m *PatchOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PatchOptions.DiscardUnknown(m) +} -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +var xxx_messageInfo_PatchOptions proto.InternalMessageInfo -func (m *Patch) Reset() { *m = Patch{} } -func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{30} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) +} -func (m *PatchOptions) Reset() { *m = PatchOptions{} } -func (*PatchOptions) ProtoMessage() {} -func (*PatchOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +var xxx_messageInfo_Preconditions proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{31} +} +func (m *RootPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RootPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_RootPaths.Merge(m, src) +} +func (m *RootPaths) XXX_Size() int { + return m.Size() +} +func (m *RootPaths) XXX_DiscardUnknown() { + xxx_messageInfo_RootPaths.DiscardUnknown(m) +} -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{32} + return fileDescriptor_cf52fa777ced5367, []int{32} +} +func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src) +} +func (m *ServerAddressByClientCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{33} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{34} +} +func (m *StatusCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusCause.Merge(m, src) +} +func (m *StatusCause) XXX_Size() int { + return m.Size() +} +func (m *StatusCause) XXX_DiscardUnknown() { + xxx_messageInfo_StatusCause.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusCause proto.InternalMessageInfo + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{35} +} +func (m *StatusDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusDetails.Merge(m, src) +} +func (m *StatusDetails) XXX_Size() int { + return m.Size() +} +func (m *StatusDetails) XXX_DiscardUnknown() { + xxx_messageInfo_StatusDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusDetails proto.InternalMessageInfo + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{36} +} +func (m *TableOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TableOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableOptions.Merge(m, src) +} +func (m *TableOptions) XXX_Size() int { + return m.Size() +} +func (m *TableOptions) XXX_DiscardUnknown() { + xxx_messageInfo_TableOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_TableOptions proto.InternalMessageInfo + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{37} +} +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} + +var xxx_messageInfo_Time proto.InternalMessageInfo + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{38} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{39} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo -func (m *Time) Reset() { *m = Time{} } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } +func (*UpdateOptions) ProtoMessage() {} +func (*UpdateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{40} +} +func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UpdateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateOptions.Merge(m, src) +} +func (m *UpdateOptions) XXX_Size() int { + return m.Size() +} +func (m *UpdateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateOptions.DiscardUnknown(m) +} -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{41} +} +func (m *Verbs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Verbs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Verbs.Merge(m, src) +} +func (m *Verbs) XXX_Size() int { + return m.Size() +} +func (m *Verbs) XXX_DiscardUnknown() { + xxx_messageInfo_Verbs.DiscardUnknown(m) +} -func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } -func (*UpdateOptions) ProtoMessage() {} -func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +var xxx_messageInfo_Verbs proto.InternalMessageInfo -func (m *Verbs) Reset() { *m = Verbs{} } -func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{42} +} +func (m *WatchEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WatchEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchEvent.Merge(m, src) +} +func (m *WatchEvent) XXX_Size() int { + return m.Size() +} +func (m *WatchEvent) XXX_DiscardUnknown() { + xxx_messageInfo_WatchEvent.DiscardUnknown(m) +} -func (m *WatchEvent) Reset() { *m = WatchEvent{} } -func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +var xxx_messageInfo_WatchEvent proto.InternalMessageInfo func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -281,7 +1253,7 @@ func init() { proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") - proto.RegisterType((*Fields)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Fields") + proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") @@ -289,9 +1261,8 @@ func init() { proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") - proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") - proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry") proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") @@ -299,7 +1270,11 @@ func init() { proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata") + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList") proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") @@ -308,6 +1283,7 @@ func init() { proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions") proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") @@ -315,10 +1291,189 @@ func init() { proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) +} + +var fileDescriptor_cf52fa777ced5367 = []byte{ + // 2713 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59, + 0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad, + 0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4, + 0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35, + 0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f, + 0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e, + 0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb, + 0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e, + 0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e, + 0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3, + 0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1, + 0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff, + 0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99, + 0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d, + 0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e, + 0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35, + 0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d, + 0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92, + 0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b, + 0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71, + 0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb, + 0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e, + 0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b, + 0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30, + 0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75, + 0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2, + 0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38, + 0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe, + 0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c, + 0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6, + 0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad, + 0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac, + 0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2, + 0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f, + 0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f, + 0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15, + 0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8, + 0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78, + 0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed, + 0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60, + 0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23, + 0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d, + 0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea, + 0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0, + 0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c, + 0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e, + 0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36, + 0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f, + 0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b, + 0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa, + 0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60, + 0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63, + 0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71, + 0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47, + 0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d, + 0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c, + 0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21, + 0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01, + 0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32, + 0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde, + 0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70, + 0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93, + 0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e, + 0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63, + 0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c, + 0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa, + 0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c, + 0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1, + 0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9, + 0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60, + 0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee, + 0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd, + 0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4, + 0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b, + 0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9, + 0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0, + 0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b, + 0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62, + 0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7, + 0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19, + 0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e, + 0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a, + 0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51, + 0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1, + 0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68, + 0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25, + 0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e, + 0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40, + 0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3, + 0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd, + 0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c, + 0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73, + 0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7, + 0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9, + 0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74, + 0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24, + 0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6, + 0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e, + 0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8, + 0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8, + 0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56, + 0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed, + 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13, + 0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49, + 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c, + 0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02, + 0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f, + 0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe, + 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45, + 0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d, + 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e, + 0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3, + 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea, + 0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1, + 0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e, + 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8, + 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93, + 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47, + 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c, + 0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f, + 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42, + 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c, + 0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1, + 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38, + 0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42, + 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde, + 0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d, + 0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6, + 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95, + 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0, + 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75, + 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00, + 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43, + 0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4, + 0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72, + 0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06, + 0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13, + 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44, + 0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77, + 0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d, + 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66, + 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd, + 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff, + 0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62, + 0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd, + 0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4, + 0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b, + 0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c, + 0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac, + 0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37, + 0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6, + 0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0, + 0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea, + 0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4, + 0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74, + 0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b, + 0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86, + 0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79, + 0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e, + 0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6, + 0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b, + 0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5, + 0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11, + 0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef, + 0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5, + 0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d, + 0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d, + 0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00, +} + func (m *APIGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -326,53 +1481,65 @@ func (m *APIGroup) Marshal() (dAtA []byte, err error) { } func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.PreferredVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x1a + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIGroupList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -380,29 +1547,36 @@ func (m *APIGroupList) Marshal() (dAtA []byte, err error) { } func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Groups) > 0 { - for _, msg := range m.Groups { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *APIResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -410,89 +1584,90 @@ func (m *APIResource) Marshal() (dAtA []byte, err error) { } func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Namespaced { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - if m.Verbs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) - n2, err := m.Verbs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.StorageVersionHash) + copy(dAtA[i:], m.StorageVersionHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) + i-- + dAtA[i] = 0x52 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x4a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x42 + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n2 } + i -= len(m.SingularName) + copy(dAtA[i:], m.SingularName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i-- + dAtA[i] = 0x32 if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) - i += copy(dAtA[i:], m.SingularName) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Verbs != nil { + { + size, err := m.Verbs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) - i += copy(dAtA[i:], m.StorageVersionHash) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i-- + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIResourceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -500,33 +1675,41 @@ func (m *APIResourceList) Marshal() (dAtA []byte, err error) { } func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) if len(m.APIResources) > 0 { - for _, msg := range m.APIResources { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.APIResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.APIResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIVersions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -534,44 +1717,45 @@ func (m *APIVersions) Marshal() (dAtA []byte, err error) { } func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 } } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Versions[iNdEx]) + copy(dAtA[i:], m.Versions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Versions[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *CreateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -579,36 +1763,36 @@ func (m *CreateOptions) Marshal() (dAtA []byte, err error) { } func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + return len(dAtA) - i, nil } func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -616,63 +1800,65 @@ func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { } func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GracePeriodSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) - n3, err := m.Preconditions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0x2a } - i += n3 + } + if m.PropagationPolicy != nil { + i -= len(*m.PropagationPolicy) + copy(dAtA[i:], *m.PropagationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i-- + dAtA[i] = 0x22 } if m.OrphanDependents != nil { - dAtA[i] = 0x18 - i++ + i-- if *m.OrphanDependents { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.PropagationPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) - i += copy(dAtA[i:], *m.PropagationPolicy) + i-- + dAtA[i] = 0x18 } - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Preconditions != nil { + { + size, err := m.Preconditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + if m.GracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Duration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -680,20 +1866,25 @@ func (m *Duration) Marshal() (dAtA []byte, err error) { } func (m *Duration) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ExportOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -701,82 +1892,68 @@ func (m *ExportOptions) Marshal() (dAtA []byte, err error) { } func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Export { + i-- + if m.Exact { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- dAtA[i] = 0x10 - i++ - if m.Exact { + i-- + if m.Export { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *Fields) Marshal() (dAtA []byte, err error) { +func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Fields) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FieldsV1) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldsV1) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Map) > 0 { - keysForMap := make([]string, 0, len(m.Map)) - for k := range m.Map { - keysForMap = append(keysForMap, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - for _, k := range keysForMap { - dAtA[i] = 0xa - i++ - v := m.Map[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n4, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -784,21 +1961,27 @@ func (m *GetOptions) Marshal() (dAtA []byte, err error) { } func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -806,25 +1989,32 @@ func (m *GroupKind) Marshal() (dAtA []byte, err error) { } func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -832,199 +2022,174 @@ func (m *GroupResource) Marshal() (dAtA []byte, err error) { } func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil -} - -func (m *GroupVersion) Marshal() (dAtA []byte, err error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { +func (m *GroupVersion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil -} - -func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { +func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Version) + copy(dAtA[i:], m.Version) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionForDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Version) + copy(dAtA[i:], m.Version) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Initializer) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + return len(dAtA) - i, nil } -func (m *Initializers) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Pending) > 0 { - for _, msg := range m.Pending { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Result != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n5, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - return i, nil + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LabelSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1032,51 +2197,60 @@ func (m *LabelSelector) Marshal() (dAtA []byte, err error) { } func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } if len(m.MatchLabels) > 0 { keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) for k := range m.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) - for _, k := range keysForMatchLabels { - dAtA[i] = 0xa - i++ - v := m.MatchLabels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.MatchLabels[string(keysForMatchLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(keysForMatchLabels[iNdEx]) + copy(dAtA[i:], keysForMatchLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1084,40 +2258,41 @@ func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { } func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) if len(m.Values) > 0 { - for _, s := range m.Values { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1125,37 +2300,46 @@ func (m *List) Marshal() (dAtA []byte, err error) { } func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ListMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1163,29 +2347,42 @@ func (m *ListMeta) Marshal() (dAtA []byte, err error) { } func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x1a - i++ + if m.RemainingItemCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ListOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1193,322 +2390,451 @@ func (m *ListOptions) Marshal() (dAtA []byte, err error) { } func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) - i += copy(dAtA[i:], m.LabelSelector) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) - i += copy(dAtA[i:], m.FieldSelector) - dAtA[i] = 0x18 - i++ + i-- + if m.AllowWatchBookmarks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x38 + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x28 + } + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i-- if m.Watch { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - if m.TimeoutSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + i -= len(m.FieldSelector) + copy(dAtA[i:], m.FieldSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i-- + dAtA[i] = 0x12 + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FieldsV1 != nil { + { + size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.FieldsType) + copy(dAtA[i:], m.FieldsType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldsType))) + i-- + dAtA[i] = 0x32 + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x12 + i -= len(m.Manager) + copy(dAtA[i:], m.Manager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ManagedFields) > 0 { + for iNdEx := len(m.ManagedFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ManagedFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0x7a + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0x72 + } + } + if len(m.OwnerReferences) > 0 { + for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.DeletionGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + i-- + dAtA[i] = 0x50 + } + if m.DeletionTimestamp != nil { + { + size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + { + size, err := m.CreationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x38 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.GenerateName) + copy(dAtA[i:], m.GenerateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { +func (m *OwnerReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) - i += copy(dAtA[i:], m.Manager) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i += copy(dAtA[i:], m.Operation) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Time != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n7, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.BlockOwnerDeletion != nil { + i-- + if *m.BlockOwnerDeletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n7 + i-- + dAtA[i] = 0x38 } - if m.Fields != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Fields.Size())) - n8, err := m.Fields.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Controller != nil { + i-- + if *m.Controller { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n8 + i-- + dAtA[i] = 0x30 } - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) - i += copy(dAtA[i:], m.GenerateName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n9, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - if m.DeletionTimestamp != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n10, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.DeletionGracePeriodSeconds != nil { - dAtA[i] = 0x50 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for _, k := range keysForLabels { - dAtA[i] = 0x5a - i++ - v := m.Labels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.Annotations) > 0 { - keysForAnnotations := make([]string, 0, len(m.Annotations)) - for k := range m.Annotations { - keysForAnnotations = append(keysForAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for _, k := range keysForAnnotations { - dAtA[i] = 0x62 - i++ - v := m.Annotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.OwnerReferences) > 0 { - for _, msg := range m.OwnerReferences { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0x72 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) - i += copy(dAtA[i:], m.ClusterName) - if m.Initializers != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n11, err := m.Initializers.MarshalTo(dAtA[i:]) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n11 - } - if len(m.ManagedFields) > 0 { - for _, msg := range m.ManagedFields { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *OwnerReference) Marshal() (dAtA []byte, err error) { +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Controller != nil { - dAtA[i] = 0x30 - i++ - if *m.Controller { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ } - if m.BlockOwnerDeletion != nil { - dAtA[i] = 0x38 - i++ - if *m.BlockOwnerDeletion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Patch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1516,17 +2842,22 @@ func (m *Patch) Marshal() (dAtA []byte, err error) { } func (m *Patch) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *PatchOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1534,46 +2865,46 @@ func (m *PatchOptions) Marshal() (dAtA []byte, err error) { } func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if m.Force != nil { - dAtA[i] = 0x10 - i++ + i-- if *m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1581,29 +2912,36 @@ func (m *Preconditions) Marshal() (dAtA []byte, err error) { } func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) - } if m.ResourceVersion != nil { - dAtA[i] = 0x12 - i++ + i -= len(*m.ResourceVersion) + copy(dAtA[i:], *m.ResourceVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) + i-- + dAtA[i] = 0x12 + } + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RootPaths) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1611,32 +2949,31 @@ func (m *RootPaths) Marshal() (dAtA []byte, err error) { } func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RootPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, s := range m.Paths { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1644,25 +2981,32 @@ func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { } func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerAddressByClientCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) - i += copy(dAtA[i:], m.ClientCIDR) - dAtA[i] = 0x12 - i++ + i -= len(m.ServerAddress) + copy(dAtA[i:], m.ServerAddress) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) - i += copy(dAtA[i:], m.ServerAddress) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ClientCIDR) + copy(dAtA[i:], m.ClientCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1670,50 +3014,62 @@ func (m *Status) Marshal() (dAtA []byte, err error) { } func (m *Status) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x30 if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n13, err := m.Details.MarshalTo(dAtA[i:]) + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n13 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusCause) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1721,29 +3077,37 @@ func (m *StatusCause) Marshal() (dAtA []byte, err error) { } func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ + i -= len(m.Field) + copy(dAtA[i:], m.Field) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) - i += copy(dAtA[i:], m.Field) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1751,48 +3115,87 @@ func (m *StatusDetails) Marshal() (dAtA []byte, err error) { } func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + i-- + dAtA[i] = 0x28 if len(m.Causes) > 0 { - for _, msg := range m.Causes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TableOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IncludeObject) + copy(dAtA[i:], m.IncludeObject) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Timestamp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1800,23 +3203,28 @@ func (m *Timestamp) Marshal() (dAtA []byte, err error) { } func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1824,25 +3232,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1850,36 +3265,36 @@ func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { } func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x12 if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + return len(dAtA) - i, nil } func (m Verbs) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1887,32 +3302,31 @@ func (m Verbs) Marshal() (dAtA []byte, err error) { } func (m Verbs) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m Verbs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *WatchEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1920,35 +3334,48 @@ func (m *WatchEvent) Marshal() (dAtA []byte, err error) { } func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *APIGroup) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1971,6 +3398,9 @@ func (m *APIGroup) Size() (n int) { } func (m *APIGroupList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Groups) > 0 { @@ -1983,6 +3413,9 @@ func (m *APIGroupList) Size() (n int) { } func (m *APIResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2018,6 +3451,9 @@ func (m *APIResource) Size() (n int) { } func (m *APIResourceList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -2032,6 +3468,9 @@ func (m *APIResourceList) Size() (n int) { } func (m *APIVersions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Versions) > 0 { @@ -2050,6 +3489,9 @@ func (m *APIVersions) Size() (n int) { } func (m *CreateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2064,6 +3506,9 @@ func (m *CreateOptions) Size() (n int) { } func (m *DeleteOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.GracePeriodSeconds != nil { @@ -2090,6 +3535,9 @@ func (m *DeleteOptions) Size() (n int) { } func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Duration)) @@ -2097,6 +3545,9 @@ func (m *Duration) Size() (n int) { } func (m *ExportOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -2104,22 +3555,23 @@ func (m *ExportOptions) Size() (n int) { return n } -func (m *Fields) Size() (n int) { +func (m *FieldsV1) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Map) > 0 { - for k, v := range m.Map { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *GetOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ResourceVersion) @@ -2128,6 +3580,9 @@ func (m *GetOptions) Size() (n int) { } func (m *GroupKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2138,6 +3593,9 @@ func (m *GroupKind) Size() (n int) { } func (m *GroupResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2148,6 +3606,9 @@ func (m *GroupResource) Size() (n int) { } func (m *GroupVersion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2158,6 +3619,9 @@ func (m *GroupVersion) Size() (n int) { } func (m *GroupVersionForDiscovery) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -2168,6 +3632,9 @@ func (m *GroupVersionForDiscovery) Size() (n int) { } func (m *GroupVersionKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2180,6 +3647,9 @@ func (m *GroupVersionKind) Size() (n int) { } func (m *GroupVersionResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2191,31 +3661,10 @@ func (m *GroupVersionResource) Size() (n int) { return n } -func (m *Initializer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Initializers) Size() (n int) { - var l int - _ = l - if len(m.Pending) > 0 { - for _, e := range m.Pending { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *LabelSelector) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.MatchLabels) > 0 { @@ -2236,6 +3685,9 @@ func (m *LabelSelector) Size() (n int) { } func (m *LabelSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Key) @@ -2252,6 +3704,9 @@ func (m *LabelSelectorRequirement) Size() (n int) { } func (m *List) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2266,6 +3721,9 @@ func (m *List) Size() (n int) { } func (m *ListMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.SelfLink) @@ -2274,10 +3732,16 @@ func (m *ListMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) + if m.RemainingItemCount != nil { + n += 1 + sovGenerated(uint64(*m.RemainingItemCount)) + } return n } func (m *ListOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.LabelSelector) @@ -2293,10 +3757,14 @@ func (m *ListOptions) Size() (n int) { n += 1 + sovGenerated(uint64(m.Limit)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } func (m *ManagedFieldsEntry) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Manager) @@ -2309,14 +3777,19 @@ func (m *ManagedFieldsEntry) Size() (n int) { l = m.Time.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Fields != nil { - l = m.Fields.Size() + l = len(m.FieldsType) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldsV1 != nil { + l = m.FieldsV1.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *ObjectMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2371,10 +3844,6 @@ func (m *ObjectMeta) Size() (n int) { } l = len(m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) - if m.Initializers != nil { - l = m.Initializers.Size() - n += 2 + l + sovGenerated(uint64(l)) - } if len(m.ManagedFields) > 0 { for _, e := range m.ManagedFields { l = e.Size() @@ -2385,6 +3854,9 @@ func (m *ObjectMeta) Size() (n int) { } func (m *OwnerReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2404,13 +3876,47 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *PartialObjectMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Patch) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *PatchOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2428,6 +3934,9 @@ func (m *PatchOptions) Size() (n int) { } func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.UID != nil { @@ -2442,6 +3951,9 @@ func (m *Preconditions) Size() (n int) { } func (m *RootPaths) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -2454,6 +3966,9 @@ func (m *RootPaths) Size() (n int) { } func (m *ServerAddressByClientCIDR) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ClientCIDR) @@ -2464,6 +3979,9 @@ func (m *ServerAddressByClientCIDR) Size() (n int) { } func (m *Status) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2483,6 +4001,9 @@ func (m *Status) Size() (n int) { } func (m *StatusCause) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2495,6 +4016,9 @@ func (m *StatusCause) Size() (n int) { } func (m *StatusDetails) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2515,7 +4039,21 @@ func (m *StatusDetails) Size() (n int) { return n } +func (m *TableOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IncludeObject) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Seconds)) @@ -2524,6 +4062,9 @@ func (m *Timestamp) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2534,6 +4075,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *UpdateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2548,6 +4092,9 @@ func (m *UpdateOptions) Size() (n int) { } func (m Verbs) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -2560,6 +4107,9 @@ func (m Verbs) Size() (n int) { } func (m *WatchEvent) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2570,14 +4120,7 @@ func (m *WatchEvent) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2586,11 +4129,21 @@ func (this *APIGroup) String() string { if this == nil { return "nil" } + repeatedStringForVersions := "[]GroupVersionForDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForServerAddressByClientCIDRs := "[]ServerAddressByClientCIDR{" + for _, f := range this.ServerAddressByClientCIDRs { + repeatedStringForServerAddressByClientCIDRs += strings.Replace(strings.Replace(f.String(), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForServerAddressByClientCIDRs += "}" s := strings.Join([]string{`&APIGroup{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + repeatedStringForServerAddressByClientCIDRs + `,`, `}`, }, "") return s @@ -2599,8 +4152,13 @@ func (this *APIGroupList) String() string { if this == nil { return "nil" } + repeatedStringForGroups := "[]APIGroup{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(strings.Replace(f.String(), "APIGroup", "APIGroup", 1), `&`, ``, 1) + "," + } + repeatedStringForGroups += "}" s := strings.Join([]string{`&APIGroupList{`, - `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `Groups:` + repeatedStringForGroups + `,`, `}`, }, "") return s @@ -2628,9 +4186,14 @@ func (this *APIResourceList) String() string { if this == nil { return "nil" } + repeatedStringForAPIResources := "[]APIResource{" + for _, f := range this.APIResources { + repeatedStringForAPIResources += strings.Replace(strings.Replace(f.String(), "APIResource", "APIResource", 1), `&`, ``, 1) + "," + } + repeatedStringForAPIResources += "}" s := strings.Join([]string{`&APIResourceList{`, `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `APIResources:` + repeatedStringForAPIResources + `,`, `}`, }, "") return s @@ -2652,7 +4215,7 @@ func (this *DeleteOptions) String() string { } s := strings.Join([]string{`&DeleteOptions{`, `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, - `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `Preconditions:` + strings.Replace(this.Preconditions.String(), "Preconditions", "Preconditions", 1) + `,`, `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, @@ -2681,22 +4244,12 @@ func (this *ExportOptions) String() string { }, "") return s } -func (this *Fields) String() string { +func (this *FieldsV1) String() string { if this == nil { return "nil" } - keysForMap := make([]string, 0, len(this.Map)) - for k := range this.Map { - keysForMap = append(keysForMap, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - mapStringForMap := "map[string]Fields{" - for _, k := range keysForMap { - mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k]) - } - mapStringForMap += "}" - s := strings.Join([]string{`&Fields{`, - `Map:` + mapStringForMap + `,`, + s := strings.Join([]string{`&FieldsV1{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, `}`, }, "") return s @@ -2722,31 +4275,15 @@ func (this *GroupVersionForDiscovery) String() string { }, "") return s } -func (this *Initializer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Initializers) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializers{`, - `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, - `}`, - }, "") - return s -} func (this *LabelSelector) String() string { if this == nil { return "nil" } + repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) for k := range this.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, k) @@ -2759,7 +4296,7 @@ func (this *LabelSelector) String() string { mapStringForMatchLabels += "}" s := strings.Join([]string{`&LabelSelector{`, `MatchLabels:` + mapStringForMatchLabels + `,`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, `}`, }, "") return s @@ -2780,9 +4317,14 @@ func (this *List) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&List{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2795,6 +4337,7 @@ func (this *ListMeta) String() string { `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`, `}`, }, "") return s @@ -2811,6 +4354,7 @@ func (this *ListOptions) String() string { `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, `}`, }, "") return s @@ -2824,7 +4368,8 @@ func (this *ManagedFieldsEntry) String() string { `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, - `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Fields", "Fields", 1) + `,`, + `FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`, + `FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`, `}`, }, "") return s @@ -2833,6 +4378,16 @@ func (this *ObjectMeta) String() string { if this == nil { return "nil" } + repeatedStringForOwnerReferences := "[]OwnerReference{" + for _, f := range this.OwnerReferences { + repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForOwnerReferences += "}" + repeatedStringForManagedFields := "[]ManagedFieldsEntry{" + for _, f := range this.ManagedFields { + repeatedStringForManagedFields += strings.Replace(strings.Replace(f.String(), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForManagedFields += "}" keysForLabels := make([]string, 0, len(this.Labels)) for k := range this.Labels { keysForLabels = append(keysForLabels, k) @@ -2861,16 +4416,15 @@ func (this *ObjectMeta) String() string { `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreationTimestamp), "Time", "Time", 1), `&`, ``, 1) + `,`, `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, - `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, - `ManagedFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ManagedFields), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + `,`, + `ManagedFields:` + repeatedStringForManagedFields + `,`, `}`, }, "") return s @@ -2890,6 +4444,32 @@ func (this *OwnerReference) String() string { }, "") return s } +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *Patch) String() string { if this == nil { return "nil" @@ -2952,7 +4532,7 @@ func (this *Status) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Details:` + strings.Replace(this.Details.String(), "StatusDetails", "StatusDetails", 1) + `,`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `}`, }, "") @@ -2974,17 +4554,32 @@ func (this *StatusDetails) String() string { if this == nil { return "nil" } + repeatedStringForCauses := "[]StatusCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "StatusCause", "StatusCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" s := strings.Join([]string{`&StatusDetails{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `Causes:` + repeatedStringForCauses + `,`, `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, }, "") return s } +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + `}`, + }, "") + return s +} func (this *Timestamp) String() string { if this == nil { return "nil" @@ -3024,7 +4619,7 @@ func (this *WatchEvent) String() string { } s := strings.Join([]string{`&WatchEvent{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3052,7 +4647,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3080,7 +4675,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3090,6 +4685,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3109,7 +4707,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3118,6 +4716,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3140,7 +4741,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3149,6 +4750,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3170,7 +4774,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3179,6 +4783,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3196,6 +4803,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3223,7 +4833,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3251,7 +4861,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3260,6 +4870,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3277,6 +4890,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3304,7 +4920,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3332,7 +4948,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3342,6 +4958,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3361,7 +4980,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3381,7 +5000,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3391,6 +5010,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3410,7 +5032,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3419,6 +5041,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3443,7 +5068,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3453,6 +5078,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3472,7 +5100,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3482,6 +5110,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3501,7 +5132,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3511,6 +5142,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3530,7 +5164,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3540,6 +5174,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3559,7 +5196,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3569,6 +5206,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3588,7 +5228,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3598,6 +5238,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3612,6 +5255,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3639,7 +5285,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3667,7 +5313,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3677,6 +5323,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3696,7 +5345,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3705,6 +5354,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3722,6 +5374,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3749,7 +5404,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3777,7 +5432,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3787,6 +5442,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3806,7 +5464,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3815,6 +5473,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3832,6 +5493,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3859,7 +5523,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3887,7 +5551,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3897,6 +5561,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3916,7 +5583,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3926,6 +5593,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3940,6 +5610,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3967,7 +5640,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3995,7 +5668,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4015,7 +5688,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4024,6 +5697,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4048,7 +5724,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4069,7 +5745,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4079,6 +5755,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4099,7 +5778,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4109,6 +5788,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4123,6 +5805,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4150,7 +5835,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4178,7 +5863,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Duration |= (time.Duration(b) & 0x7F) << shift + m.Duration |= time.Duration(b&0x7F) << shift if b < 0x80 { break } @@ -4192,6 +5877,9 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4219,7 +5907,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4240,212 +5928,39 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Export = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Exact = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Fields) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Fields: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Fields: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Map == nil { - m.Map = make(map[string]Fields) - } - var mapkey string - mapvalue := &Fields{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Fields{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Map[mapkey] = *mapvalue - iNdEx = postIndex + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4455,6 +5970,9 @@ func (m *Fields) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4467,7 +5985,7 @@ func (m *Fields) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetOptions) Unmarshal(dAtA []byte) error { +func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4482,7 +6000,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4490,17 +6008,17 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + return fmt.Errorf("proto: FieldsV1: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FieldsV1: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4510,20 +6028,25 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -4534,6 +6057,9 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4546,7 +6072,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupKind) Unmarshal(dAtA []byte) error { +func (m *GetOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4561,7 +6087,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4569,15 +6095,15 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4589,7 +6115,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4599,39 +6125,13 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4642,6 +6142,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4654,7 +6157,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupResource) Unmarshal(dAtA []byte) error { +func (m *GroupKind) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4669,7 +6172,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4677,10 +6180,10 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4697,7 +6200,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4707,6 +6210,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4714,7 +6220,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4726,7 +6232,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4736,10 +6242,13 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4750,6 +6259,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4762,7 +6274,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersion) Unmarshal(dAtA []byte) error { +func (m *GroupResource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4777,7 +6289,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4785,10 +6297,10 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4805,7 +6317,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4815,6 +6327,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4822,7 +6337,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4834,7 +6349,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4844,10 +6359,13 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4858,6 +6376,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4870,7 +6391,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { +func (m *GroupVersion) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4885,7 +6406,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4893,15 +6414,15 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4913,7 +6434,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4923,10 +6444,13 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.GroupVersion = string(dAtA[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4942,7 +6466,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4952,6 +6476,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4966,6 +6493,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4978,7 +6508,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { +func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4993,7 +6523,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5001,15 +6531,15 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5021,7 +6551,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5031,10 +6561,13 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(dAtA[iNdEx:postIndex]) + m.GroupVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -5050,7 +6583,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5060,39 +6593,13 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5103,6 +6610,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5115,7 +6625,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { +func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5130,7 +6640,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5138,10 +6648,10 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -5158,7 +6668,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5168,6 +6678,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5187,7 +6700,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5197,6 +6710,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5204,7 +6720,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5216,7 +6732,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5226,10 +6742,13 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5240,6 +6759,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5252,7 +6774,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Initializer) Unmarshal(dAtA []byte) error { +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5267,7 +6789,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5275,15 +6797,15 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5295,7 +6817,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5305,66 +6827,19 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Initializers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5374,28 +6849,29 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Pending = append(m.Pending, Initializer{}) - if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5405,24 +6881,23 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5433,6 +6908,9 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5460,7 +6938,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5488,7 +6966,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5497,6 +6975,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5517,7 +6998,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5534,7 +7015,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5544,6 +7025,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -5560,7 +7044,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5570,6 +7054,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -5606,7 +7093,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5615,6 +7102,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5632,6 +7122,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5659,7 +7152,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5687,7 +7180,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5697,6 +7190,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5716,7 +7212,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5726,6 +7222,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5745,7 +7244,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5755,6 +7254,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5769,6 +7271,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5796,7 +7301,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5824,7 +7329,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5833,6 +7338,9 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5854,7 +7362,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5863,10 +7371,13 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -5880,6 +7391,9 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5907,7 +7421,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5935,7 +7449,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5945,6 +7459,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5964,7 +7481,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5974,6 +7491,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5993,7 +7513,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6003,11 +7523,34 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RemainingItemCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6017,6 +7560,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6044,7 +7590,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6072,7 +7618,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6082,6 +7628,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6101,7 +7650,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6111,6 +7660,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6130,7 +7682,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6150,7 +7702,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6160,6 +7712,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6179,7 +7734,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6199,7 +7754,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= (int64(b) & 0x7F) << shift + m.Limit |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6218,7 +7773,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6228,11 +7783,34 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowWatchBookmarks = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6242,6 +7820,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6269,7 +7850,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6297,7 +7878,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6307,6 +7888,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6326,7 +7910,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6336,6 +7920,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6355,7 +7942,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6365,6 +7952,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6384,7 +7974,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6393,6 +7983,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6403,9 +7996,41 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldsType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldsType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldsV1", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6417,7 +8042,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6426,13 +8051,16 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Fields == nil { - m.Fields = &Fields{} + if m.FieldsV1 == nil { + m.FieldsV1 = &FieldsV1{} } - if err := m.Fields.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldsV1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6445,6 +8073,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6472,7 +8103,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6500,7 +8131,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6510,6 +8141,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6529,7 +8163,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6539,6 +8173,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6558,7 +8195,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6568,6 +8205,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6587,7 +8227,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6597,6 +8237,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6616,7 +8259,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6626,6 +8269,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6645,7 +8291,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6655,6 +8301,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6674,7 +8323,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift + m.Generation |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6693,7 +8342,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6702,6 +8351,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6723,7 +8375,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6732,6 +8384,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6756,7 +8411,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6776,7 +8431,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6785,6 +8440,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6805,7 +8463,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6822,7 +8480,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6832,6 +8490,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6848,7 +8509,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6858,6 +8519,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6894,7 +8558,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6903,6 +8567,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6923,7 +8590,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6940,7 +8607,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6950,6 +8617,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6966,7 +8636,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6976,6 +8646,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6996,11 +8669,109 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Annotations[mapkey] = mapvalue + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7012,7 +8783,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7021,17 +8792,73 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) + if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7043,7 +8870,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7053,14 +8880,17 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7072,7 +8902,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7082,16 +8912,19 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterName = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 16: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7101,30 +8934,29 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Initializers == nil { - m.Initializers = &Initializers{} - } - if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 17: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7134,23 +8966,66 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) - if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7160,6 +9035,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7172,7 +9050,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } return nil } -func (m *OwnerReference) Unmarshal(dAtA []byte) error { +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7187,7 +9065,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7195,17 +9073,17 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7215,55 +9093,83 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7273,26 +9179,30 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7302,63 +9212,26 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Controller = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.Items = append(m.Items, PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - b := bool(v != 0) - m.BlockOwnerDeletion = &b + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7368,6 +9241,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7395,7 +9271,7 @@ func (m *Patch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7418,6 +9294,9 @@ func (m *Patch) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7445,7 +9324,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7473,7 +9352,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7483,6 +9362,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7502,7 +9384,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7523,7 +9405,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7533,6 +9415,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7547,6 +9432,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7574,7 +9462,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7602,7 +9490,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7612,6 +9500,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7632,7 +9523,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7642,6 +9533,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7657,6 +9551,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7684,7 +9581,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7712,7 +9609,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7722,6 +9619,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7736,6 +9636,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7763,7 +9666,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7791,7 +9694,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7801,6 +9704,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7820,7 +9726,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7830,6 +9736,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7844,6 +9753,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7871,7 +9783,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7899,7 +9811,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7908,6 +9820,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7929,7 +9844,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7939,6 +9854,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7958,7 +9876,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7968,6 +9886,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7987,7 +9908,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7997,6 +9918,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8016,7 +9940,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8025,6 +9949,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8049,7 +9976,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= (int32(b) & 0x7F) << shift + m.Code |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8063,6 +9990,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8090,7 +10020,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8118,7 +10048,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8128,6 +10058,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8147,7 +10080,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8157,6 +10090,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8176,7 +10112,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8186,6 +10122,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8200,6 +10139,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8227,7 +10169,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8255,7 +10197,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8265,6 +10207,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8284,7 +10229,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8294,6 +10239,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8313,7 +10261,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8323,6 +10271,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8342,7 +10293,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8351,6 +10302,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8373,7 +10327,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + m.RetryAfterSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8392,7 +10346,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8402,6 +10356,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8416,6 +10373,94 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8443,7 +10488,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8471,7 +10516,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Seconds |= (int64(b) & 0x7F) << shift + m.Seconds |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -8490,7 +10535,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Nanos |= (int32(b) & 0x7F) << shift + m.Nanos |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8504,6 +10549,9 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8531,7 +10579,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8559,7 +10607,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8569,6 +10617,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8588,7 +10639,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8598,6 +10649,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8612,6 +10666,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8639,7 +10696,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8667,7 +10724,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8677,6 +10734,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8696,7 +10756,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8706,6 +10766,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8720,6 +10783,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8747,7 +10813,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8775,7 +10841,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8785,6 +10851,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8799,6 +10868,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8826,7 +10898,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8854,7 +10926,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8864,6 +10936,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8883,7 +10958,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8892,6 +10967,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8908,6 +10986,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8974,10 +11055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -9006,6 +11090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -9024,179 +11111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2674 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0x4d, 0x6c, 0x23, 0x57, - 0x39, 0x63, 0xc7, 0x8e, 0xfd, 0x39, 0xce, 0xcf, 0xeb, 0x16, 0x5c, 0x4b, 0xc4, 0xe9, 0x14, 0x55, - 0x29, 0x6c, 0x6d, 0x92, 0xd2, 0x6a, 0x59, 0x68, 0x21, 0x8e, 0x93, 0x6d, 0xe8, 0xa6, 0x89, 0x5e, - 0xba, 0x8b, 0x58, 0x56, 0x88, 0x89, 0xe7, 0xc5, 0x19, 0x62, 0xcf, 0x4c, 0xdf, 0x1b, 0x67, 0x37, - 0x70, 0xa0, 0x07, 0x10, 0x20, 0x41, 0xb5, 0x47, 0x4e, 0xa8, 0x2b, 0xb8, 0x70, 0xe5, 0xc4, 0x89, - 0x53, 0x25, 0xf6, 0x58, 0x89, 0x4b, 0x0f, 0xc8, 0xea, 0x06, 0x24, 0xb8, 0x71, 0xcf, 0x01, 0xa1, - 0xf7, 0x33, 0x33, 0x6f, 0xec, 0x78, 0x33, 0x66, 0x0b, 0xe2, 0x14, 0xcf, 0xf7, 0xff, 0xbe, 0xef, - 0x7b, 0xdf, 0xf7, 0xbd, 0x2f, 0xb0, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0xaf, 0x71, 0xdc, 0x3f, 0x20, - 0xd4, 0x25, 0x01, 0x61, 0x8d, 0x13, 0xe2, 0xda, 0x1e, 0x6d, 0x28, 0x84, 0xe5, 0x3b, 0x3d, 0xab, - 0x7d, 0xe4, 0xb8, 0x84, 0x9e, 0x36, 0xfc, 0xe3, 0x0e, 0x07, 0xb0, 0x46, 0x8f, 0x04, 0x56, 0xe3, - 0x64, 0xb5, 0xd1, 0x21, 0x2e, 0xa1, 0x56, 0x40, 0xec, 0xba, 0x4f, 0xbd, 0xc0, 0x43, 0x9f, 0x97, - 0x5c, 0x75, 0x9d, 0xab, 0xee, 0x1f, 0x77, 0x38, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0x64, 0xb5, 0xfa, - 0x72, 0xc7, 0x09, 0x8e, 0xfa, 0x07, 0xf5, 0xb6, 0xd7, 0x6b, 0x74, 0xbc, 0x8e, 0xd7, 0x10, 0xcc, - 0x07, 0xfd, 0x43, 0xf1, 0x25, 0x3e, 0xc4, 0x2f, 0x29, 0xb4, 0x3a, 0xd6, 0x14, 0xda, 0x77, 0x03, - 0xa7, 0x47, 0x86, 0xad, 0xa8, 0xbe, 0x76, 0x19, 0x03, 0x6b, 0x1f, 0x91, 0x9e, 0x35, 0xcc, 0x67, - 0xfe, 0x29, 0x0b, 0x85, 0xf5, 0xbd, 0xed, 0x1b, 0xd4, 0xeb, 0xfb, 0x68, 0x19, 0xa6, 0x5d, 0xab, - 0x47, 0x2a, 0xc6, 0xb2, 0xb1, 0x52, 0x6c, 0xce, 0x3e, 0x1a, 0xd4, 0xa6, 0xce, 0x06, 0xb5, 0xe9, - 0xb7, 0xad, 0x1e, 0xc1, 0x02, 0x83, 0xba, 0x50, 0x38, 0x21, 0x94, 0x39, 0x9e, 0xcb, 0x2a, 0x99, - 0xe5, 0xec, 0x4a, 0x69, 0xed, 0x8d, 0x7a, 0x9a, 0xf3, 0xd7, 0x85, 0x82, 0xdb, 0x92, 0x75, 0xcb, - 0xa3, 0x2d, 0x87, 0xb5, 0xbd, 0x13, 0x42, 0x4f, 0x9b, 0x0b, 0x4a, 0x4b, 0x41, 0x21, 0x19, 0x8e, - 0x34, 0xa0, 0x1f, 0x1b, 0xb0, 0xe0, 0x53, 0x72, 0x48, 0x28, 0x25, 0xb6, 0xc2, 0x57, 0xb2, 0xcb, - 0xc6, 0xa7, 0xa0, 0xb6, 0xa2, 0xd4, 0x2e, 0xec, 0x0d, 0xc9, 0xc7, 0x23, 0x1a, 0xd1, 0x6f, 0x0c, - 0xa8, 0x32, 0x42, 0x4f, 0x08, 0x5d, 0xb7, 0x6d, 0x4a, 0x18, 0x6b, 0x9e, 0x6e, 0x74, 0x1d, 0xe2, - 0x06, 0x1b, 0xdb, 0x2d, 0xcc, 0x2a, 0xd3, 0xc2, 0x0f, 0x5f, 0x4f, 0x67, 0xd0, 0xfe, 0x38, 0x39, - 0x4d, 0x53, 0x59, 0x54, 0x1d, 0x4b, 0xc2, 0xf0, 0x13, 0xcc, 0x30, 0x0f, 0x61, 0x36, 0x0c, 0xe4, - 0x4d, 0x87, 0x05, 0xe8, 0x36, 0xe4, 0x3b, 0xfc, 0x83, 0x55, 0x0c, 0x61, 0x60, 0x3d, 0x9d, 0x81, - 0xa1, 0x8c, 0xe6, 0x9c, 0xb2, 0x27, 0x2f, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0x4f, 0x43, 0x69, - 0x7d, 0x6f, 0x1b, 0x13, 0xe6, 0xf5, 0x69, 0x9b, 0xa4, 0x48, 0x9a, 0x35, 0x00, 0xfe, 0x97, 0xf9, - 0x56, 0x9b, 0xd8, 0x95, 0xcc, 0xb2, 0xb1, 0x52, 0x68, 0x22, 0x45, 0x07, 0x6f, 0x47, 0x18, 0xac, - 0x51, 0x71, 0xa9, 0xc7, 0x8e, 0x6b, 0x8b, 0x68, 0x6b, 0x52, 0xdf, 0x72, 0x5c, 0x1b, 0x0b, 0x0c, - 0xba, 0x09, 0xb9, 0x13, 0x42, 0x0f, 0xb8, 0xff, 0x79, 0x42, 0x7c, 0x31, 0xdd, 0xf1, 0x6e, 0x73, - 0x96, 0x66, 0xf1, 0x6c, 0x50, 0xcb, 0x89, 0x9f, 0x58, 0x0a, 0x41, 0x75, 0x00, 0x76, 0xe4, 0xd1, - 0x40, 0x98, 0x53, 0xc9, 0x2d, 0x67, 0x57, 0x8a, 0xcd, 0x39, 0x6e, 0xdf, 0x7e, 0x04, 0xc5, 0x1a, - 0x05, 0xba, 0x06, 0xb3, 0xcc, 0x71, 0x3b, 0xfd, 0xae, 0x45, 0x39, 0xa0, 0x92, 0x17, 0x76, 0x5e, - 0x51, 0x76, 0xce, 0xee, 0x6b, 0x38, 0x9c, 0xa0, 0xe4, 0x9a, 0xda, 0x56, 0x40, 0x3a, 0x1e, 0x75, - 0x08, 0xab, 0xcc, 0xc4, 0x9a, 0x36, 0x22, 0x28, 0xd6, 0x28, 0xd0, 0x0b, 0x90, 0x13, 0x9e, 0xaf, - 0x14, 0x84, 0x8a, 0xb2, 0x52, 0x91, 0x13, 0x61, 0xc1, 0x12, 0x87, 0x5e, 0x82, 0x19, 0x75, 0x6b, - 0x2a, 0x45, 0x41, 0x36, 0xaf, 0xc8, 0x66, 0xc2, 0xb4, 0x0e, 0xf1, 0xe8, 0x9b, 0x80, 0x58, 0xe0, - 0x51, 0xab, 0x43, 0x14, 0xea, 0x4d, 0x8b, 0x1d, 0x55, 0x40, 0x70, 0x55, 0x15, 0x17, 0xda, 0x1f, - 0xa1, 0xc0, 0x17, 0x70, 0x99, 0xbf, 0x37, 0x60, 0x5e, 0xcb, 0x05, 0x91, 0x77, 0xd7, 0x60, 0xb6, - 0xa3, 0xdd, 0x3a, 0x95, 0x17, 0x91, 0x67, 0xf4, 0x1b, 0x89, 0x13, 0x94, 0x88, 0x40, 0x91, 0x2a, - 0x49, 0x61, 0x75, 0x59, 0x4d, 0x9d, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, 0x0d, 0xc8, 0x70, 0x2c, 0xd9, - 0xfc, 0xbb, 0x21, 0x12, 0x38, 0xac, 0x37, 0x68, 0x45, 0xab, 0x69, 0x86, 0x08, 0xc7, 0xec, 0x98, - 0x7a, 0x74, 0x49, 0x21, 0xc8, 0xfc, 0x5f, 0x14, 0x82, 0xeb, 0x85, 0x5f, 0x7d, 0x50, 0x9b, 0x7a, - 0xef, 0x2f, 0xcb, 0x53, 0x66, 0x0f, 0xca, 0x1b, 0x94, 0x58, 0x01, 0xd9, 0xf5, 0x03, 0x71, 0x00, - 0x13, 0xf2, 0x36, 0x3d, 0xc5, 0x7d, 0x57, 0x1d, 0x14, 0xf8, 0xfd, 0x6e, 0x09, 0x08, 0x56, 0x18, - 0x1e, 0xbf, 0x43, 0x87, 0x74, 0xed, 0x1d, 0xcb, 0xb5, 0x3a, 0x84, 0xaa, 0x1b, 0x18, 0x79, 0x75, - 0x4b, 0xc3, 0xe1, 0x04, 0xa5, 0xf9, 0xd3, 0x2c, 0x94, 0x5b, 0xa4, 0x4b, 0x62, 0x7d, 0x5b, 0x80, - 0x3a, 0xd4, 0x6a, 0x93, 0x3d, 0x42, 0x1d, 0xcf, 0xde, 0x27, 0x6d, 0xcf, 0xb5, 0x99, 0xc8, 0x88, - 0x6c, 0xf3, 0x33, 0x3c, 0xcf, 0x6e, 0x8c, 0x60, 0xf1, 0x05, 0x1c, 0xa8, 0x0b, 0x65, 0x9f, 0x8a, - 0xdf, 0x4e, 0xa0, 0x7a, 0x0f, 0xbf, 0xf3, 0xaf, 0xa4, 0x73, 0xf5, 0x9e, 0xce, 0xda, 0x5c, 0x3c, - 0x1b, 0xd4, 0xca, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x8f, 0xfa, 0x47, 0x96, 0xdb, - 0x22, 0x3e, 0x71, 0x6d, 0xe2, 0x06, 0x4c, 0x78, 0xa1, 0xd0, 0xbc, 0xc2, 0x3b, 0xc6, 0xee, 0x10, - 0x0e, 0x8f, 0x50, 0xa3, 0x3b, 0xb0, 0xe8, 0x53, 0xcf, 0xb7, 0x3a, 0x16, 0x97, 0xb8, 0xe7, 0x75, - 0x9d, 0xf6, 0xa9, 0xa8, 0x53, 0xc5, 0xe6, 0xd5, 0xb3, 0x41, 0x6d, 0x71, 0x6f, 0x18, 0x79, 0x3e, - 0xa8, 0x3d, 0x23, 0x5c, 0xc7, 0x21, 0x31, 0x12, 0x8f, 0x8a, 0xd1, 0x62, 0x98, 0x1b, 0x17, 0x43, - 0x73, 0x1b, 0x0a, 0xad, 0x3e, 0x15, 0x5c, 0xe8, 0x75, 0x28, 0xd8, 0xea, 0xb7, 0xf2, 0xfc, 0xf3, - 0x61, 0xcb, 0x0d, 0x69, 0xce, 0x07, 0xb5, 0x32, 0x1f, 0x12, 0xea, 0x21, 0x00, 0x47, 0x2c, 0xe6, - 0x5d, 0x28, 0x6f, 0xde, 0xf7, 0x3d, 0x1a, 0x84, 0x31, 0x7d, 0x11, 0xf2, 0x44, 0x00, 0x84, 0xb4, - 0x42, 0xdc, 0x27, 0x24, 0x19, 0x56, 0x58, 0x5e, 0xb7, 0xc8, 0x7d, 0xab, 0x1d, 0xa8, 0x82, 0x1f, - 0xd5, 0xad, 0x4d, 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x68, 0x40, 0x5e, 0x64, 0x14, 0x43, 0xef, 0x40, - 0xb6, 0x67, 0xf9, 0xaa, 0x59, 0xbd, 0x9a, 0x2e, 0xb2, 0x92, 0xb5, 0xbe, 0x63, 0xf9, 0x9b, 0x6e, - 0x40, 0x4f, 0x9b, 0x25, 0xa5, 0x24, 0xbb, 0x63, 0xf9, 0x98, 0x8b, 0xab, 0xda, 0x50, 0x08, 0xb1, - 0x68, 0x01, 0xb2, 0xc7, 0xe4, 0x54, 0x16, 0x24, 0xcc, 0x7f, 0xa2, 0x26, 0xe4, 0x4e, 0xac, 0x6e, - 0x9f, 0xa8, 0x7c, 0xba, 0x3a, 0x89, 0x56, 0x2c, 0x59, 0xaf, 0x67, 0xae, 0x19, 0xe6, 0x2e, 0xc0, - 0x0d, 0x12, 0x79, 0x68, 0x1d, 0xe6, 0xc3, 0x6a, 0x93, 0x2c, 0x82, 0x9f, 0x55, 0xe6, 0xcd, 0xe3, - 0x24, 0x1a, 0x0f, 0xd3, 0x9b, 0x77, 0xa1, 0x28, 0x0a, 0x25, 0xef, 0x77, 0x71, 0x07, 0x30, 0x9e, - 0xd0, 0x01, 0xc2, 0x86, 0x99, 0x19, 0xd7, 0x30, 0xb5, 0xba, 0xd0, 0x85, 0xb2, 0xe4, 0x0d, 0x7b, - 0x78, 0x2a, 0x0d, 0x57, 0xa1, 0x10, 0x9a, 0xa9, 0xb4, 0x44, 0xb3, 0x5b, 0x28, 0x08, 0x47, 0x14, - 0x9a, 0xb6, 0x23, 0x48, 0x14, 0xfd, 0x74, 0xca, 0xb4, 0x86, 0x96, 0x79, 0x72, 0x43, 0xd3, 0x34, - 0xfd, 0x08, 0x2a, 0xe3, 0x06, 0xbe, 0xa7, 0x68, 0x4b, 0xe9, 0x4d, 0x31, 0xdf, 0x37, 0x60, 0x41, - 0x97, 0x94, 0x3e, 0x7c, 0xe9, 0x95, 0x5c, 0x3e, 0x1a, 0x69, 0x1e, 0xf9, 0xb5, 0x01, 0x57, 0x12, - 0x47, 0x9b, 0x28, 0xe2, 0x13, 0x18, 0xa5, 0x27, 0x47, 0x76, 0x82, 0xe4, 0x68, 0x40, 0x69, 0xdb, - 0x75, 0x02, 0xc7, 0xea, 0x3a, 0x3f, 0x20, 0xf4, 0xf2, 0x61, 0xd2, 0xfc, 0xa3, 0x01, 0xb3, 0x1a, - 0x07, 0x43, 0x77, 0x61, 0x86, 0xd7, 0x5d, 0xc7, 0xed, 0xa8, 0xda, 0x91, 0x72, 0x66, 0xd0, 0x84, - 0xc4, 0xe7, 0xda, 0x93, 0x92, 0x70, 0x28, 0x12, 0xed, 0x41, 0x9e, 0x12, 0xd6, 0xef, 0x06, 0x93, - 0x95, 0x88, 0xfd, 0xc0, 0x0a, 0xfa, 0x4c, 0xd6, 0x66, 0x2c, 0xf8, 0xb1, 0x92, 0x63, 0xfe, 0x39, - 0x03, 0xe5, 0x9b, 0xd6, 0x01, 0xe9, 0xee, 0x93, 0x2e, 0x69, 0x07, 0x1e, 0x45, 0x3f, 0x84, 0x52, - 0xcf, 0x0a, 0xda, 0x47, 0x02, 0x1a, 0x8e, 0xeb, 0xad, 0x74, 0x8a, 0x12, 0x92, 0xea, 0x3b, 0xb1, - 0x18, 0x59, 0x10, 0x9f, 0x51, 0x07, 0x2b, 0x69, 0x18, 0xac, 0x6b, 0x13, 0x6f, 0x2c, 0xf1, 0xbd, - 0x79, 0xdf, 0xe7, 0xb3, 0xc4, 0xe4, 0x4f, 0xbb, 0x84, 0x09, 0x98, 0xbc, 0xdb, 0x77, 0x28, 0xe9, - 0x11, 0x37, 0x88, 0xdf, 0x58, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0xea, 0x1b, 0xb0, 0x30, 0x6c, - 0xfc, 0x05, 0xf5, 0xfa, 0x8a, 0x5e, 0xaf, 0x8b, 0x7a, 0x05, 0xfe, 0xad, 0x01, 0x95, 0x71, 0x86, - 0xa0, 0xcf, 0x69, 0x82, 0xe2, 0x1e, 0xf1, 0x16, 0x39, 0x95, 0x52, 0x37, 0xa1, 0xe0, 0xf9, 0xfc, - 0x55, 0xec, 0x51, 0x95, 0xe7, 0x2f, 0x85, 0xb9, 0xbb, 0xab, 0xe0, 0xe7, 0x83, 0xda, 0xb3, 0x09, - 0xf1, 0x21, 0x02, 0x47, 0xac, 0xbc, 0x31, 0x0b, 0x7b, 0xf8, 0xb0, 0x10, 0x35, 0xe6, 0xdb, 0x02, - 0x82, 0x15, 0xc6, 0xfc, 0x83, 0x01, 0xd3, 0x62, 0x4a, 0xbe, 0x0b, 0x05, 0xee, 0x3f, 0xdb, 0x0a, - 0x2c, 0x61, 0x57, 0xea, 0xf7, 0x19, 0xe7, 0xde, 0x21, 0x81, 0x15, 0xdf, 0xaf, 0x10, 0x82, 0x23, - 0x89, 0x08, 0x43, 0xce, 0x09, 0x48, 0x2f, 0x0c, 0xe4, 0xcb, 0x63, 0x45, 0xab, 0xed, 0x40, 0x1d, - 0x5b, 0xf7, 0x36, 0xef, 0x07, 0xc4, 0xe5, 0xc1, 0x88, 0x8b, 0xc1, 0x36, 0x97, 0x81, 0xa5, 0x28, - 0xf3, 0x77, 0x06, 0x44, 0xaa, 0xf8, 0x75, 0x67, 0xa4, 0x7b, 0x78, 0xd3, 0x71, 0x8f, 0x95, 0x5b, - 0x23, 0x73, 0xf6, 0x15, 0x1c, 0x47, 0x14, 0x17, 0x35, 0xc4, 0xcc, 0x64, 0x0d, 0x91, 0x2b, 0x6c, - 0x7b, 0x6e, 0xe0, 0xb8, 0xfd, 0x91, 0xfa, 0xb2, 0xa1, 0xe0, 0x38, 0xa2, 0x30, 0xff, 0x95, 0x81, - 0x12, 0xb7, 0x35, 0xec, 0xc8, 0x5f, 0x85, 0x72, 0x57, 0x8f, 0x9e, 0xb2, 0xf9, 0x59, 0x25, 0x22, - 0x79, 0x1f, 0x71, 0x92, 0x96, 0x33, 0x8b, 0x31, 0x37, 0x62, 0xce, 0x24, 0x99, 0xb7, 0x74, 0x24, - 0x4e, 0xd2, 0xf2, 0x3a, 0x7b, 0x8f, 0xe7, 0xb5, 0x1a, 0x20, 0x23, 0xd7, 0x7e, 0x8b, 0x03, 0xb1, - 0xc4, 0x5d, 0xe4, 0x9f, 0xe9, 0x09, 0xfd, 0x73, 0x1d, 0xe6, 0x78, 0x20, 0xbd, 0x7e, 0x10, 0x4e, - 0xd9, 0x39, 0x31, 0xeb, 0xa1, 0xb3, 0x41, 0x6d, 0xee, 0x9d, 0x04, 0x06, 0x0f, 0x51, 0x72, 0x1b, - 0xbb, 0x4e, 0xcf, 0x09, 0x2a, 0x33, 0x82, 0x25, 0xb2, 0xf1, 0x26, 0x07, 0x62, 0x89, 0x4b, 0x04, - 0xa0, 0x70, 0x69, 0x00, 0xfe, 0x91, 0x01, 0x24, 0x9f, 0x05, 0xb6, 0x9c, 0x96, 0xe4, 0x8d, 0x7e, - 0x09, 0x66, 0x7a, 0xea, 0x59, 0x61, 0x24, 0x1b, 0x4a, 0xf8, 0xa2, 0x08, 0xf1, 0x68, 0x07, 0x8a, - 0xf2, 0x66, 0xc5, 0xd9, 0xd2, 0x50, 0xc4, 0xc5, 0xdd, 0x10, 0x71, 0x3e, 0xa8, 0x55, 0x13, 0x6a, - 0x22, 0xcc, 0x3b, 0xa7, 0x3e, 0xc1, 0xb1, 0x04, 0xb4, 0x06, 0x60, 0xf9, 0x8e, 0xbe, 0x43, 0x2a, - 0xc6, 0x3b, 0x88, 0xf8, 0x35, 0x88, 0x35, 0x2a, 0xf4, 0x26, 0x4c, 0x73, 0x4f, 0xa9, 0x05, 0xc3, - 0x17, 0xd2, 0xdd, 0x4f, 0xee, 0xeb, 0x66, 0x81, 0x37, 0x2d, 0xfe, 0x0b, 0x0b, 0x09, 0xe8, 0x0e, - 0xe4, 0x45, 0x5a, 0xc8, 0xa8, 0x4c, 0x38, 0x68, 0x8a, 0x57, 0x87, 0x9a, 0x92, 0xcf, 0xa3, 0x5f, - 0x58, 0x49, 0x34, 0xdf, 0x85, 0xe2, 0x8e, 0xd3, 0xa6, 0x1e, 0x57, 0xc7, 0x1d, 0xcc, 0x12, 0xaf, - 0xac, 0xc8, 0xc1, 0x61, 0xf0, 0x43, 0x3c, 0x8f, 0xba, 0x6b, 0xb9, 0x9e, 0x7c, 0x4b, 0xe5, 0xe2, - 0xa8, 0xbf, 0xcd, 0x81, 0x58, 0xe2, 0xae, 0x5f, 0xe1, 0x8d, 0xfa, 0x67, 0x0f, 0x6b, 0x53, 0x0f, - 0x1e, 0xd6, 0xa6, 0x3e, 0x78, 0xa8, 0x9a, 0xf6, 0xdf, 0x4a, 0x00, 0xbb, 0x07, 0xdf, 0x27, 0x6d, - 0x59, 0x0c, 0x2e, 0xdf, 0x00, 0xf1, 0xe1, 0x4b, 0x2d, 0x1e, 0xc5, 0xb6, 0x24, 0x33, 0x34, 0x7c, - 0x69, 0x38, 0x9c, 0xa0, 0x44, 0x0d, 0x28, 0x46, 0x5b, 0x21, 0x15, 0xb6, 0xc5, 0x30, 0x0d, 0xa2, - 0xd5, 0x11, 0x8e, 0x69, 0x12, 0x95, 0x69, 0xfa, 0xd2, 0xca, 0xd4, 0x84, 0x6c, 0xdf, 0xb1, 0x45, - 0x54, 0x8a, 0xcd, 0x2f, 0x85, 0x9d, 0xe1, 0xd6, 0x76, 0xeb, 0x7c, 0x50, 0x7b, 0x7e, 0xdc, 0x4a, - 0x35, 0x38, 0xf5, 0x09, 0xab, 0xdf, 0xda, 0x6e, 0x61, 0xce, 0x7c, 0xd1, 0xed, 0xcd, 0x4f, 0x78, - 0x7b, 0xd7, 0x00, 0xd4, 0xa9, 0x39, 0xb7, 0xbc, 0x86, 0x51, 0x76, 0xde, 0x88, 0x30, 0x58, 0xa3, - 0x42, 0x0c, 0x16, 0xdb, 0xfc, 0x71, 0xcf, 0x93, 0xdd, 0xe9, 0x11, 0x16, 0x58, 0x3d, 0xb9, 0x23, - 0x9a, 0x2c, 0x55, 0x9f, 0x53, 0x6a, 0x16, 0x37, 0x86, 0x85, 0xe1, 0x51, 0xf9, 0xc8, 0x83, 0x45, - 0x5b, 0x3d, 0x53, 0x63, 0xa5, 0xc5, 0x89, 0x95, 0x3e, 0xcb, 0x15, 0xb6, 0x86, 0x05, 0xe1, 0x51, - 0xd9, 0xe8, 0xbb, 0x50, 0x0d, 0x81, 0xa3, 0xbb, 0x02, 0xb1, 0xb5, 0xca, 0x36, 0x97, 0xce, 0x06, - 0xb5, 0x6a, 0x6b, 0x2c, 0x15, 0x7e, 0x82, 0x04, 0x64, 0x43, 0xbe, 0x2b, 0xc7, 0xae, 0x92, 0x68, - 0x95, 0x5f, 0x4b, 0x77, 0x8a, 0x38, 0xfb, 0xeb, 0xfa, 0xb8, 0x15, 0xbd, 0x85, 0xd5, 0xa4, 0xa5, - 0x64, 0xa3, 0xfb, 0x50, 0xb2, 0x5c, 0xd7, 0x0b, 0x2c, 0xb9, 0xbd, 0x98, 0x15, 0xaa, 0xd6, 0x27, - 0x56, 0xb5, 0x1e, 0xcb, 0x18, 0x1a, 0xef, 0x34, 0x0c, 0xd6, 0x55, 0xa1, 0x7b, 0x30, 0xef, 0xdd, - 0x73, 0x09, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb6, 0x09, 0xab, 0x94, 0x85, 0xf6, 0x2f, 0xa7, 0xd4, - 0x9e, 0x60, 0x8e, 0x53, 0x3a, 0x09, 0x67, 0x78, 0x58, 0x0b, 0xaa, 0x03, 0x1c, 0x3a, 0xae, 0x1a, - 0xd2, 0x2b, 0x73, 0xf1, 0x9a, 0x73, 0x2b, 0x82, 0x62, 0x8d, 0x02, 0xbd, 0x0a, 0xa5, 0x76, 0xb7, - 0xcf, 0x02, 0x22, 0xf7, 0xa9, 0xf3, 0xe2, 0x06, 0x45, 0xe7, 0xdb, 0x88, 0x51, 0x58, 0xa7, 0x43, - 0x47, 0x30, 0xeb, 0x68, 0xaf, 0x81, 0xca, 0x82, 0xc8, 0xc5, 0xb5, 0x89, 0x9f, 0x00, 0xac, 0xb9, - 0xc0, 0x2b, 0x91, 0x0e, 0xc1, 0x09, 0xc9, 0xa8, 0x0f, 0xe5, 0x9e, 0xde, 0x6a, 0x2a, 0x8b, 0xc2, - 0x8f, 0xd7, 0xd2, 0xa9, 0x1a, 0x6d, 0x86, 0xf1, 0x00, 0x91, 0xc0, 0xe1, 0xa4, 0x96, 0xea, 0x57, - 0xa0, 0xf4, 0x1f, 0xce, 0xc4, 0x7c, 0xa6, 0x1e, 0xce, 0x98, 0x89, 0x66, 0xea, 0x0f, 0x33, 0x30, - 0x97, 0x8c, 0x73, 0xf4, 0xf6, 0x34, 0xc6, 0xae, 0xe5, 0xc3, 0x66, 0x90, 0x1d, 0xdb, 0x0c, 0x54, - 0xcd, 0x9d, 0x7e, 0x9a, 0x9a, 0x9b, 0x6c, 0xe7, 0xb9, 0x54, 0xed, 0xbc, 0x0e, 0xc0, 0xe7, 0x13, - 0xea, 0x75, 0xbb, 0x84, 0x8a, 0x12, 0x5d, 0x50, 0x8b, 0xf7, 0x08, 0x8a, 0x35, 0x0a, 0xb4, 0x05, - 0xe8, 0xa0, 0xeb, 0xb5, 0x8f, 0x85, 0x0b, 0xc2, 0xf2, 0x22, 0x8a, 0x73, 0x41, 0x2e, 0x2f, 0x9b, - 0x23, 0x58, 0x7c, 0x01, 0x87, 0x39, 0x03, 0xb9, 0x3d, 0x3e, 0xe6, 0x99, 0xbf, 0x34, 0x60, 0x56, - 0xfc, 0x9a, 0x64, 0x1d, 0x5b, 0x83, 0xdc, 0xa1, 0x17, 0xae, 0x5c, 0x0a, 0xf2, 0x3f, 0x17, 0x5b, - 0x1c, 0x80, 0x25, 0xfc, 0x29, 0xf6, 0xb5, 0xef, 0x1b, 0x90, 0x5c, 0x84, 0xa2, 0x37, 0x64, 0x68, - 0x8c, 0x68, 0x53, 0x39, 0x61, 0x58, 0x5e, 0x1f, 0x37, 0xe8, 0x3f, 0x93, 0x6a, 0xeb, 0x75, 0x15, - 0x8a, 0xd8, 0xf3, 0x82, 0x3d, 0x2b, 0x38, 0x62, 0xfc, 0xe0, 0x3e, 0xff, 0xa1, 0x7c, 0x23, 0x0e, - 0x2e, 0x30, 0x58, 0xc2, 0xcd, 0x5f, 0x18, 0xf0, 0xdc, 0xd8, 0x15, 0x39, 0xcf, 0x90, 0x76, 0xf4, - 0xa5, 0x4e, 0x14, 0x65, 0x48, 0x4c, 0x87, 0x35, 0x2a, 0x3e, 0xe9, 0x27, 0xf6, 0xea, 0xc3, 0x93, - 0x7e, 0x42, 0x1b, 0x4e, 0xd2, 0x9a, 0xff, 0xcc, 0x40, 0x5e, 0x3e, 0xfb, 0xff, 0xcb, 0x8f, 0xbb, - 0x17, 0x21, 0xcf, 0x84, 0x1e, 0x65, 0x5e, 0xd4, 0x74, 0xa4, 0x76, 0xac, 0xb0, 0x62, 0xd8, 0x26, - 0x8c, 0x59, 0x9d, 0xf0, 0x32, 0xc6, 0xc3, 0xb6, 0x04, 0xe3, 0x10, 0x8f, 0x5e, 0x83, 0x3c, 0x25, - 0x16, 0x8b, 0xde, 0x1d, 0x4b, 0xa1, 0x48, 0x2c, 0xa0, 0xe7, 0x83, 0xda, 0xac, 0x12, 0x2e, 0xbe, - 0xb1, 0xa2, 0x46, 0x77, 0x60, 0xc6, 0x26, 0x81, 0xe5, 0x74, 0xc3, 0xc1, 0xf6, 0x95, 0x49, 0xd6, - 0x23, 0x2d, 0xc9, 0xda, 0x2c, 0x71, 0x9b, 0xd4, 0x07, 0x0e, 0x05, 0xf2, 0x42, 0xd2, 0xf6, 0x6c, - 0xf9, 0x9f, 0xb5, 0x5c, 0x5c, 0x48, 0x36, 0x3c, 0x9b, 0x60, 0x81, 0x31, 0x1f, 0x18, 0x50, 0x92, - 0x92, 0x36, 0xac, 0x3e, 0x23, 0x68, 0x35, 0x3a, 0x85, 0x0c, 0x77, 0x38, 0xda, 0x4c, 0xf3, 0xc7, - 0xc0, 0xf9, 0xa0, 0x56, 0x14, 0x64, 0xe2, 0x65, 0x10, 0x1e, 0x40, 0xf3, 0x51, 0xe6, 0x12, 0x1f, - 0xbd, 0x00, 0x39, 0x71, 0x7b, 0x94, 0x33, 0xa3, 0x79, 0x59, 0x5c, 0x30, 0x2c, 0x71, 0xe6, 0x27, - 0x19, 0x28, 0x27, 0x0e, 0x97, 0x62, 0x38, 0x8e, 0x56, 0x71, 0x99, 0x14, 0xeb, 0xdd, 0xf1, 0xff, - 0x0f, 0xfd, 0x36, 0xe4, 0xdb, 0xfc, 0x7c, 0xe1, 0x3f, 0xa4, 0x57, 0x27, 0x09, 0x85, 0xf0, 0x4c, - 0x9c, 0x49, 0xe2, 0x93, 0x61, 0x25, 0x10, 0xdd, 0x80, 0x45, 0x4a, 0x02, 0x7a, 0xba, 0x7e, 0x18, - 0x10, 0xaa, 0xbf, 0x2f, 0x73, 0xf1, 0xf8, 0x88, 0x87, 0x09, 0xf0, 0x28, 0x4f, 0x58, 0xfa, 0xf3, - 0x4f, 0x51, 0xfa, 0xcd, 0x2e, 0x4c, 0xff, 0x0f, 0x9f, 0x3a, 0xdf, 0x81, 0x62, 0x3c, 0x8c, 0x7e, - 0xca, 0x2a, 0xcd, 0xef, 0x41, 0x81, 0x67, 0x63, 0xf8, 0x88, 0xba, 0xa4, 0xb3, 0x26, 0x7b, 0x5e, - 0x26, 0x4d, 0xcf, 0x33, 0x7b, 0x50, 0xbe, 0xe5, 0xdb, 0x4f, 0xf9, 0x1f, 0xc0, 0x4c, 0xea, 0x8e, - 0xb2, 0x06, 0xf2, 0xbf, 0xea, 0xbc, 0x78, 0xcb, 0x05, 0x94, 0x56, 0xbc, 0xf5, 0x6d, 0x92, 0xb6, - 0x01, 0xfe, 0x89, 0x01, 0x20, 0xb6, 0x21, 0x9b, 0x27, 0xc4, 0x0d, 0xb8, 0x1f, 0x78, 0xc0, 0x87, - 0xfd, 0x20, 0x6e, 0xad, 0xc0, 0xa0, 0x5b, 0x90, 0xf7, 0xc4, 0x4c, 0xac, 0x56, 0xb2, 0x13, 0x6e, - 0xb7, 0xa2, 0x24, 0x97, 0x83, 0x35, 0x56, 0xc2, 0x9a, 0x2b, 0x8f, 0x1e, 0x2f, 0x4d, 0x7d, 0xf4, - 0x78, 0x69, 0xea, 0xe3, 0xc7, 0x4b, 0x53, 0xef, 0x9d, 0x2d, 0x19, 0x8f, 0xce, 0x96, 0x8c, 0x8f, - 0xce, 0x96, 0x8c, 0x8f, 0xcf, 0x96, 0x8c, 0x4f, 0xce, 0x96, 0x8c, 0x07, 0x7f, 0x5d, 0x9a, 0xba, - 0x93, 0x39, 0x59, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x66, 0x55, 0x2c, 0x41, 0x24, - 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 36bda1fe..605505e1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -212,21 +212,20 @@ message ExportOptions { optional bool exact = 2; } -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -message Fields { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - map map = 1; +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +message FieldsV1 { + // Raw is the underlying serialization of this object. + optional bytes Raw = 1; } // GetOptions is the standard query options to the standard REST get call. @@ -302,27 +301,6 @@ message GroupVersionResource { optional string resource = 3; } -// Initializer is information about an initializer that has not yet completed. -message Initializer { - // name of the process that is responsible for initializing this object. - optional string name = 1; -} - -// Initializers tracks the progress of initialization. -message Initializers { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - repeated Initializer pending = 1; - - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - optional Status result = 2; -} - // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. @@ -361,7 +339,7 @@ message LabelSelectorRequirement { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; @@ -375,6 +353,10 @@ message ListMeta { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 1; @@ -383,7 +365,7 @@ message ListMeta { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 2; @@ -395,6 +377,18 @@ message ListMeta { // identical to the value in the first response, unless you have received this token from an error // message. optional string continue = 3; + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + optional int64 remainingItemCount = 4; } // ListOptions is the query options to a standard REST list call. @@ -414,6 +408,20 @@ message ListOptions { // +optional optional bool watch = 3; + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // + // This field is beta. + // + // +optional + optional bool allowWatchBookmarks = 9; + // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -483,9 +491,13 @@ message ManagedFieldsEntry { // +optional optional Time time = 4; - // Fields identifies a set of fields. + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + optional string fieldsType = 6; + + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional - optional Fields fields = 5; + optional FieldsV1 fieldsV1 = 7; } // MicroTime is version of Time with microsecond level precision. @@ -532,7 +544,7 @@ message ObjectMeta { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional optional string generateName = 2; @@ -550,6 +562,10 @@ message ObjectMeta { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 4; @@ -572,7 +588,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -588,7 +604,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time creationTimestamp = 8; @@ -609,7 +625,7 @@ message ObjectMeta { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time deletionTimestamp = 9; @@ -643,19 +659,6 @@ message ObjectMeta { // +patchStrategy=merge repeated OwnerReference ownerReferences = 13; - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. - optional Initializers initializers = 16; - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries @@ -678,8 +681,6 @@ message ObjectMeta { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional repeated ManagedFieldsEntry managedFields = 17; } @@ -692,7 +693,7 @@ message OwnerReference { optional string apiVersion = 5; // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; // Name of the referent. @@ -717,6 +718,28 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadata { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional ObjectMeta metadata = 1; +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // items contains each of the included items. + repeated PartialObjectMetadata items = 2; +} + // Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. message Patch { } @@ -780,13 +803,13 @@ message ServerAddressByClientCIDR { // Status is a return value for calls that don't return other objects. message Status { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional string status = 2; @@ -857,7 +880,7 @@ message StatusDetails { // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 3; @@ -879,6 +902,16 @@ message StatusDetails { optional int32 retryAfterSeconds = 5; } +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message TableOptions { + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + optional string includeObject = 1; +} + // Time is a wrapper around time.Time which supports correct // marshaling to YAML and JSON. Wrappers are provided for many // of the factory methods that the time package offers. @@ -925,14 +958,14 @@ message TypeMeta { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional optional string apiVersion = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index b4dc78b3..ad989ad7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -17,7 +17,9 @@ limitations under the License. package v1 import ( + "bytes" "encoding/json" + "errors" "fmt" "k8s.io/apimachinery/pkg/fields" @@ -250,18 +252,31 @@ func ResetObjectMetaForStatus(meta, existingMeta Object) { meta.SetAnnotations(existingMeta.GetAnnotations()) meta.SetFinalizers(existingMeta.GetFinalizers()) meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) - meta.SetManagedFields(existingMeta.GetManagedFields()) + // managedFields must be preserved since it's been modified to + // track changed fields in the status update. + //meta.SetManagedFields(existingMeta.GetManagedFields()) } // MarshalJSON implements json.Marshaler -func (f Fields) MarshalJSON() ([]byte, error) { - return json.Marshal(&f.Map) +// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (f FieldsV1) MarshalJSON() ([]byte, error) { + if f.Raw == nil { + return []byte("null"), nil + } + return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler -func (f *Fields) UnmarshalJSON(b []byte) error { - return json.Unmarshal(b, &f.Map) +func (f *FieldsV1) UnmarshalJSON(b []byte) error { + if f == nil { + return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + } + if !bytes.Equal(b, []byte("null")) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil } -var _ json.Marshaler = Fields{} -var _ json.Unmarshaler = &Fields{} +var _ json.Marshaler = FieldsV1{} +var _ json.Unmarshaler = &FieldsV1{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 05f07adf..912cf203 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -55,8 +55,6 @@ type Object interface { SetLabels(labels map[string]string) GetAnnotations() map[string]string SetAnnotations(annotations map[string]string) - GetInitializers() *Initializers - SetInitializers(initializers *Initializers) GetFinalizers() []string SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference @@ -94,6 +92,8 @@ type ListInterface interface { SetSelfLink(selfLink string) GetContinue() string SetContinue(c string) + GetRemainingItemCount() *int64 + SetRemainingItemCount(c *int64) } // Type exposes the type and APIVersion of versioned or internal API objects. @@ -105,12 +105,16 @@ type Type interface { SetKind(kind string) } +var _ ListInterface = &ListMeta{} + func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } func (meta *ListMeta) GetContinue() string { return meta.Continue } func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } +func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount } +func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } @@ -160,8 +164,6 @@ func (meta *ObjectMeta) GetLabels() map[string]string { return m func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } -func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 6f6c5111..cdd9a6a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -41,11 +41,6 @@ func (t *MicroTime) DeepCopyInto(out *MicroTime) { *out = *t } -// String returns the representation of the time. -func (t MicroTime) String() string { - return t.Time.String() -} - // NewMicroTime returns a wrapped instance of the provided time func NewMicroTime(time time.Time) MicroTime { return MicroTime{time} @@ -72,22 +67,40 @@ func (t *MicroTime) IsZero() bool { // Before reports whether the time instant t is before u. func (t *MicroTime) Before(u *MicroTime) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // Equal reports whether the time instant t is equal to u. func (t *MicroTime) Equal(u *MicroTime) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // BeforeTime reports whether the time instant t is before second-lever precision u. func (t *MicroTime) BeforeTime(u *Time) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // EqualTime reports whether the time instant t is equal to second-lever precision u. func (t *MicroTime) EqualTime(u *Time) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // UnixMicro returns the local time corresponding to the given Unix time diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go index 14841be5..6dd6d899 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -70,3 +70,11 @@ func (m *MicroTime) MarshalTo(data []byte) (int, error) { } return m.ProtoMicroTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf marshalling interface. +func (m *MicroTime) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 76d042a9..368efe1e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -94,6 +94,23 @@ func init() { &PatchOptions{}, ) + if err := AddMetaToScheme(scheme); err != nil { + panic(err) + } + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. utilruntime.Must(RegisterDefaults(scheme)) } + +func AddMetaToScheme(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) + + return scheme.AddConversionFuncs( + Convert_Slice_string_To_v1_IncludeObjectPolicy, + ) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index efff656e..fe510ed9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,7 +20,7 @@ import ( "encoding/json" "time" - "github.com/google/gofuzz" + fuzz "github.com/google/gofuzz" ) // Time is a wrapper around time.Time which supports correct @@ -41,11 +41,6 @@ func (t *Time) DeepCopyInto(out *Time) { *out = *t } -// String returns the representation of the time. -func (t Time) String() string { - return t.Time.String() -} - // NewTime returns a wrapped instance of the provided time func NewTime(time time.Time) Time { return Time{time} @@ -72,7 +67,10 @@ func (t *Time) IsZero() bool { // Before reports whether the time instant t is before u. func (t *Time) Before(u *Time) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // Equal reports whether the time instant t is equal to u. @@ -147,8 +145,12 @@ func (t Time) MarshalJSON() ([]byte, error) { // Encode unset/nil objects as JSON's "null". return []byte("null"), nil } - - return json.Marshal(t.UTC().Format(time.RFC3339)) + buf := make([]byte, 0, len(time.RFC3339)+2) + buf = append(buf, '"') + // time cannot contain non escapable JSON characters + buf = t.UTC().AppendFormat(buf, time.RFC3339) + buf = append(buf, '"') + return buf, nil } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index ed72186b..eac8d965 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -75,7 +75,7 @@ func (m *Time) Unmarshal(data []byte) error { return nil } -// Marshal implements the protobuf marshalling interface. +// Marshal implements the protobuf marshaling interface. func (m *Time) Marshal() (data []byte, err error) { if m == nil || m.Time.IsZero() { return nil, nil @@ -83,10 +83,18 @@ func (m *Time) Marshal() (data []byte, err error) { return m.ProtoTime().Marshal() } -// MarshalTo implements the protobuf marshalling interface. +// MarshalTo implements the protobuf marshaling interface. func (m *Time) MarshalTo(data []byte) (int, error) { if m == nil || m.Time.IsZero() { return 0, nil } return m.ProtoTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf reverse marshaling interface. +func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index fd639525..76b27558 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -43,14 +43,14 @@ type TypeMeta struct { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } @@ -61,6 +61,10 @@ type ListMeta struct { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` @@ -69,7 +73,7 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` @@ -81,6 +85,18 @@ type ListMeta struct { // identical to the value in the first response, unless you have received this token from an error // message. Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` } // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here @@ -115,7 +131,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` @@ -133,6 +149,10 @@ type ObjectMeta struct { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` @@ -155,7 +175,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -171,7 +191,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` @@ -192,7 +212,7 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` @@ -226,19 +246,6 @@ type ObjectMeta struct { // +patchStrategy=merge OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. - Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries @@ -261,32 +268,10 @@ type ObjectMeta struct { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } -// Initializers tracks the progress of initialization. -type Initializers struct { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` -} - -// Initializer is information about an initializer that has not yet completed. -type Initializer struct { - // name of the process that is responsible for initializing this object. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - const ( // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients NamespaceDefault string = "default" @@ -307,7 +292,7 @@ type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -349,6 +334,20 @@ type ListOptions struct { // add, update, and remove notifications. Specify resourceVersion. // +optional Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // + // This field is beta. + // + // +optional + AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -586,13 +585,13 @@ type Preconditions struct { type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. @@ -631,7 +630,7 @@ type StatusDetails struct { Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. @@ -763,11 +762,13 @@ const ( // doesn't make any sense, for example deleting a read-only object. This is different than // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the // data was invalid. API calls that return BadRequest can never succeed. + // Status code 400 StatusReasonBadRequest StatusReason = "BadRequest" // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the // resource was not supported by the code - for instance, attempting to delete a resource that // can only be created. API calls that return MethodNotAllowed can never succeed. + // Status code 405 StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable @@ -866,7 +867,7 @@ const ( type List struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1099,9 +1100,16 @@ type ManagedFieldsEntry struct { // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' // +optional Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` - // Fields identifies a set of fields. + + // Fields is tombstoned to show why 5 is a reserved protobuf tag. + //Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + FieldsType string `json:"fieldsType,omitempty" protobuf:"bytes,6,opt,name=fieldsType"` + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional - Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"` } // ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. @@ -1112,19 +1120,179 @@ const ( ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" ) -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -type Fields struct { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - Map map[string]Fields `json:",inline" protobuf:"bytes,1,rep,name=map"` +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +type FieldsV1 struct { + // Raw is the underlying serialization of this object. + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"` +} + +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. This can be introduced +// in a v1 because clients a) receive an error if they try to access proto today, and b) +// once introduced they would be able to gracefully switch over to using it. + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=false +type Table struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column, such as number, integer, string, or + // array. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type modifier for this column. A format modifies the type and + // imposes additional rules, like date or time formatting for a string. The 'name' format is applied + // to the primary identifier column which has type 'string' to assist in clients identifying column + // is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow struct { + // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or + // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a + // more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. These conditions + // apply to the row, not to the object, and will be specific to table output. The only defined + // condition type is 'Completed', for a row that indicates a resource that has run to completion and + // can be given less visual priority. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // The media type of the object will always match the enclosing list - if this as a JSON table, these + // will be JSON encoded objects. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition struct { + // Type of row condition. The only defined value is 'Completed' indicating that the + // object this row represents has reached a completed state and may be given less visual + // priority than other rows. Clients are not required to honor any conditions but should + // be consistent where possible about handling the conditions. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +type RowConditionType string + +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// included by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions struct { + TypeMeta `json:",inline"` + + // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions + // and may be removed as a field in a future release. + NoHeaders bool `json:"-"` + + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata struct { + TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items contains each of the included items. + Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 3b1a09e5..07e6cc12 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -119,12 +119,12 @@ func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } -var map_Fields = map[string]string{ - "": "Fields stores a set of fields in a data structure like a Trie. To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff", +var map_FieldsV1 = map[string]string{ + "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } -func (Fields) SwaggerDoc() map[string]string { - return map_Fields +func (FieldsV1) SwaggerDoc() map[string]string { + return map_FieldsV1 } var map_GetOptions = map[string]string{ @@ -146,25 +146,6 @@ func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { return map_GroupVersionForDiscovery } -var map_Initializer = map[string]string{ - "": "Initializer is information about an initializer that has not yet completed.", - "name": "name of the process that is responsible for initializing this object.", -} - -func (Initializer) SwaggerDoc() map[string]string { - return map_Initializer -} - -var map_Initializers = map[string]string{ - "": "Initializers tracks the progress of initialization.", - "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", - "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", -} - -func (Initializers) SwaggerDoc() map[string]string { - return map_Initializers -} - var map_LabelSelector = map[string]string{ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", @@ -188,7 +169,7 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_List = map[string]string{ "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of objects", } @@ -197,10 +178,11 @@ func (List) SwaggerDoc() map[string]string { } var map_ListMeta = map[string]string{ - "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -208,14 +190,15 @@ func (ListMeta) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -228,7 +211,8 @@ var map_ManagedFieldsEntry = map[string]string{ "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", - "fields": "Fields identifies a set of fields.", + "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", } func (ManagedFieldsEntry) SwaggerDoc() map[string]string { @@ -238,22 +222,21 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.", + "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -263,7 +246,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "controller": "If true, this reference points to the managing controller.", @@ -274,6 +257,25 @@ func (OwnerReference) SwaggerDoc() map[string]string { return map_OwnerReference } +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + var map_Patch = map[string]string{ "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", } @@ -324,8 +326,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -351,7 +353,7 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", @@ -361,10 +363,66 @@ func (StatusDetails) SwaggerDoc() map[string]string { return map_StatusDetails } +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 75ac693f..7ea0986f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -32,6 +32,9 @@ import ( // NestedFieldCopy returns a deep copy of the value of a nested field. // Returns false if the value is missing. // No error is returned for a nil field. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { val, found, err := NestedFieldNoCopy(obj, fields...) if !found || err != nil { @@ -43,6 +46,9 @@ func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, // NestedFieldNoCopy returns a reference to a nested field. // Returns false if value is not found and an error if unable // to traverse obj. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj @@ -275,6 +281,22 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } +func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { + val, found, err := NestedInt64(obj, fields...) + if !found || err != nil { + return 0 + } + return val +} + +func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { + val, found, err := NestedInt64(obj, fields...) + if !found || err != nil { + return nil + } + return &val +} + func jsonPath(fields []string) string { return "." + strings.Join(fields, ".") } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 1eaa8580..d1903394 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -47,6 +47,7 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} +var _ metav1.ListInterface = &Unstructured{} func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } @@ -126,6 +127,16 @@ func (u *Unstructured) UnmarshalJSON(b []byte) error { return err } +// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. +// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. +func (in *Unstructured) NewEmptyInstance() runtime.Unstructured { + out := new(Unstructured) + if in != nil { + out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind()) + } + return out +} + func (in *Unstructured) DeepCopy() *Unstructured { if in == nil { return nil @@ -319,6 +330,18 @@ func (u *Unstructured) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } +func (u *Unstructured) GetRemainingItemCount() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") +} + +func (u *Unstructured) SetRemainingItemCount(c *int64) { + if c == nil { + RemoveNestedField(u.Object, "metadata", "remainingItemCount") + } else { + u.setNestedField(*c, "metadata", "remainingItemCount") + } +} + func (u *Unstructured) GetCreationTimestamp() metav1.Time { var timestamp metav1.Time timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) @@ -408,31 +431,6 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } -func (u *Unstructured) GetInitializers() *metav1.Initializers { - m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers") - if !found || err != nil { - return nil - } - out := &metav1.Initializers{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) - return nil - } - return out -} - -func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { - if initializers == nil { - RemoveNestedField(u.Object, "metadata", "initializers") - return - } - out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) - } - u.setNestedField(out, "metadata", "initializers") -} - func (u *Unstructured) GetFinalizers() []string { val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers") return val diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index bf3fd023..5028f5fb 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,6 +52,16 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } +// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. +// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. +func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { + out := new(UnstructuredList) + if u != nil { + out.SetGroupVersionKind(u.GroupVersionKind()) + } + return out +} + // UnstructuredContent returns a map contain an overlay of the Items field onto // the Object field. Items always overwrites overlay. func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { @@ -166,6 +176,18 @@ func (u *UnstructuredList) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } +func (u *UnstructuredList) GetRemainingItemCount() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") +} + +func (u *UnstructuredList) SetRemainingItemCount(c *int64) { + if c == nil { + RemoveNestedField(u.Object, "metadata", "remainingItemCount") + } else { + u.setNestedField(*c, "metadata", "remainingItemCount") + } +} + func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { u.SetAPIVersion(gvk.GroupVersion().String()) u.SetKind(gvk.Kind) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 68498b8d..b82fdf20 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -313,24 +313,22 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Fields) DeepCopyInto(out *Fields) { +func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in - if in.Map != nil { - in, out := &in.Map, &out.Map - *out = make(map[string]Fields, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fields. -func (in *Fields) DeepCopy() *Fields { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsV1. +func (in *FieldsV1) DeepCopy() *FieldsV1 { if in == nil { return nil } - out := new(Fields) + out := new(FieldsV1) in.DeepCopyInto(out) return out } @@ -456,48 +454,6 @@ func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializers) DeepCopyInto(out *Initializers) { - *out = *in - if in.Pending != nil { - in, out := &in.Pending, &out.Pending - *out = make([]Initializer, len(*in)) - copy(*out, *in) - } - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(Status) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. -func (in *Initializers) DeepCopy() *Initializers { - if in == nil { - return nil - } - out := new(Initializers) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { *out = *in @@ -572,7 +528,7 @@ func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -604,6 +560,11 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListMeta) DeepCopyInto(out *ListMeta) { *out = *in + if in.RemainingItemCount != nil { + in, out := &in.RemainingItemCount, &out.RemainingItemCount + *out = new(int64) + **out = **in + } return } @@ -654,9 +615,9 @@ func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { in, out := &in.Time, &out.Time *out = (*in).DeepCopy() } - if in.Fields != nil { - in, out := &in.Fields, &out.Fields - *out = new(Fields) + if in.FieldsV1 != nil { + in, out := &in.FieldsV1, &out.FieldsV1 + *out = new(FieldsV1) (*in).DeepCopyInto(*out) } return @@ -716,11 +677,6 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = new(Initializers) - (*in).DeepCopyInto(*out) - } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]string, len(*in)) @@ -772,6 +728,65 @@ func (in *OwnerReference) DeepCopy() *OwnerReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. +func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { + if in == nil { + return nil + } + out := new(PartialObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PartialObjectMetadata, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Patch) DeepCopyInto(out *Patch) { *out = *in @@ -890,7 +905,7 @@ func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { func (in *Status) DeepCopyInto(out *Status) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Details != nil { in, out := &in.Details, &out.Details *out = new(StatusDetails) @@ -954,6 +969,108 @@ func (in *StatusDetails) DeepCopy() *StatusDetails { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. +func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { + if in == nil { + return nil + } + out := new(TableColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableOptions) DeepCopyInto(out *TableOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. +func (in *TableOptions) DeepCopy() *TableOptions { + if in == nil { + return nil + } + out := new(TableOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRow) DeepCopyInto(out *TableRow) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. +func (in *TableRowCondition) DeepCopy() *TableRowCondition { + if in == nil { + return nil + } + out := new(TableRowCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. func (in *Time) DeepCopy() *Time { if in == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go index 3b2bedd9..2b7e8ca0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -15,30 +15,3 @@ limitations under the License. */ package v1beta1 - -import "k8s.io/apimachinery/pkg/runtime" - -func (in *TableRow) DeepCopy() *TableRow { - if in == nil { - return nil - } - - out := new(TableRow) - - if in.Cells != nil { - out.Cells = make([]interface{}, len(in.Cells)) - for i := range in.Cells { - out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) - } - } - - if in.Conditions != nil { - out.Conditions = make([]TableRowCondition, len(in.Conditions)) - for i := range in.Conditions { - in.Conditions[i].DeepCopyInto(&out.Conditions[i]) - } - } - - in.Object.DeepCopyInto(&out.Object) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index 47c03d69..5fae30ae 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -17,27 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto - - It has these top-level messages: - PartialObjectMetadata - PartialObjectMetadataList - TableOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -50,55 +44,71 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } -func (*PartialObjectMetadata) ProtoMessage() {} -func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_90ec10f86b91f9a8, []int{0} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) } -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func init() { - proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadata") proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") - proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.TableOptions") } -func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) } -func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil +var fileDescriptor_90ec10f86b91f9a8 = []byte{ + // 321 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, + 0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, + 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6, + 0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e, + 0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x79, 0xfe, 0xe1, 0xf7, 0xcb, 0x3f, 0x89, 0x3f, 0x8e, + 0x4f, 0x2c, 0x93, 0x9a, 0xc7, 0x59, 0x00, 0x26, 0x01, 0x04, 0xcb, 0x67, 0x90, 0x4c, 0xb4, 0xe1, + 0xe5, 0x86, 0x48, 0xa5, 0x12, 0xe1, 0x54, 0x26, 0x60, 0x1e, 0x79, 0x1a, 0x47, 0x2e, 0xb0, 0x5c, + 0x01, 0x0a, 0x3e, 0x1b, 0x04, 0x80, 0x62, 0xc0, 0x23, 0x48, 0xc0, 0x08, 0x84, 0x09, 0x4b, 0x8d, + 0x46, 0xdd, 0x3c, 0xdc, 0xa0, 0xec, 0x2b, 0xca, 0xd2, 0x38, 0x72, 0x81, 0x65, 0x0e, 0x65, 0x25, + 0xda, 0xee, 0x47, 0x12, 0xa7, 0x59, 0xc0, 0x42, 0xad, 0x78, 0xa4, 0x23, 0xcd, 0x0b, 0x43, 0x90, + 0xdd, 0x15, 0x53, 0x31, 0x14, 0xab, 0x8d, 0xb9, 0x7d, 0x54, 0xa5, 0xd4, 0x76, 0x9f, 0xf6, 0xaf, + 0x57, 0x31, 0x59, 0x82, 0x52, 0xc1, 0x37, 0xe0, 0xf8, 0x2f, 0xc0, 0x86, 0x53, 0x50, 0x62, 0x9b, + 0x3b, 0x78, 0x21, 0xfe, 0xfe, 0x95, 0x30, 0x28, 0xc5, 0xc3, 0x65, 0x70, 0x0f, 0x21, 0x5e, 0x00, + 0x8a, 0x89, 0x40, 0x71, 0x2e, 0x2d, 0x36, 0x6f, 0xfc, 0xba, 0x2a, 0xe7, 0xd6, 0xbf, 0x2e, 0xe9, + 0x35, 0x86, 0x8c, 0x55, 0x79, 0x29, 0xe6, 0x68, 0x67, 0x1a, 0xed, 0xcd, 0x57, 0x1d, 0x2f, 0x5f, + 0x75, 0xea, 0x1f, 0xc9, 0xf8, 0xd3, 0xd8, 0xbc, 0xf5, 0x6b, 0x12, 0x41, 0xd9, 0x16, 0xe9, 0xfe, + 0xef, 0x35, 0x86, 0xa7, 0xd5, 0xd4, 0x3f, 0xb6, 0x1d, 0xed, 0x96, 0xe7, 0xd4, 0xce, 0x9c, 0x71, + 0xbc, 0x11, 0x8f, 0xfa, 0xf3, 0x35, 0xf5, 0x16, 0x6b, 0xea, 0x2d, 0xd7, 0xd4, 0x7b, 0xca, 0x29, + 0x99, 0xe7, 0x94, 0x2c, 0x72, 0x4a, 0x96, 0x39, 0x25, 0xaf, 0x39, 0x25, 0xcf, 0x6f, 0xd4, 0xbb, + 0xde, 0x29, 0xbf, 0xf6, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x7e, 0x00, 0x08, 0x5a, 0x02, 0x00, + 0x00, } func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -106,65 +116,57 @@ func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { } func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil -} - -func (m *TableOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) - i += copy(dAtA[i:], m.IncludeObject) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PartialObjectMetadata) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return base } - func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Items) > 0 { @@ -173,56 +175,29 @@ func (m *PartialObjectMetadataList) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - return n -} - -func (m *TableOptions) Size() (n int) { - var l int - _ = l - l = len(m.IncludeObject) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) return n } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *PartialObjectMetadata) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadata{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *PartialObjectMetadataList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PartialObjectMetadataList{`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TableOptions) String() string { - if this == nil { - return "nil" + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," } - s := strings.Join([]string{`&TableOptions{`, - `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -235,7 +210,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -250,7 +225,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -258,15 +233,15 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -278,7 +253,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -287,66 +262,20 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, v1.PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -358,7 +287,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -367,11 +296,13 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &PartialObjectMetadata{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -384,83 +315,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TableOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -529,10 +384,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -561,6 +419,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -579,35 +440,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 375 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcd, 0x0a, 0xd3, 0x40, - 0x10, 0xc7, 0xb3, 0x48, 0xd1, 0x6e, 0xed, 0x25, 0x22, 0xd4, 0x1e, 0x36, 0xa5, 0xa7, 0x0a, 0x76, - 0xd7, 0x16, 0x11, 0x8f, 0x92, 0x5b, 0x41, 0x69, 0x09, 0x9e, 0x3c, 0xb9, 0x49, 0xc6, 0x74, 0xcd, - 0xc7, 0x86, 0xec, 0xa6, 0xd0, 0x8b, 0xf8, 0x08, 0x3e, 0x56, 0x8f, 0x3d, 0xf6, 0x14, 0x6c, 0x7c, - 0x0b, 0x4f, 0x92, 0x0f, 0xec, 0x87, 0x15, 0x7b, 0x9b, 0xf9, 0x0f, 0xbf, 0x5f, 0x66, 0xb2, 0xd8, - 0x09, 0xdf, 0x28, 0x2a, 0x24, 0x0b, 0x73, 0x17, 0xb2, 0x04, 0x34, 0x28, 0xb6, 0x81, 0xc4, 0x97, - 0x19, 0x6b, 0x07, 0x3c, 0x15, 0x31, 0xf7, 0xd6, 0x22, 0x81, 0x6c, 0xcb, 0xd2, 0x30, 0xa8, 0x02, - 0xc5, 0x62, 0xd0, 0x9c, 0x6d, 0x66, 0x2e, 0x68, 0x3e, 0x63, 0x01, 0x24, 0x90, 0x71, 0x0d, 0x3e, - 0x4d, 0x33, 0xa9, 0xa5, 0xf9, 0xbc, 0x41, 0xe9, 0x39, 0x4a, 0xd3, 0x30, 0xa8, 0x02, 0x45, 0x2b, - 0x94, 0xb6, 0xe8, 0x70, 0x1a, 0x08, 0xbd, 0xce, 0x5d, 0xea, 0xc9, 0x98, 0x05, 0x32, 0x90, 0xac, - 0x36, 0xb8, 0xf9, 0xe7, 0xba, 0xab, 0x9b, 0xba, 0x6a, 0xcc, 0xc3, 0x57, 0xf7, 0x2c, 0x75, 0xbd, - 0xcf, 0xf0, 0x9f, 0xa7, 0x64, 0x79, 0xa2, 0x45, 0x0c, 0x7f, 0x01, 0xaf, 0xff, 0x07, 0x28, 0x6f, - 0x0d, 0x31, 0xbf, 0xe6, 0xc6, 0x5b, 0xfc, 0x74, 0xc5, 0x33, 0x2d, 0x78, 0xb4, 0x74, 0xbf, 0x80, - 0xa7, 0xdf, 0x83, 0xe6, 0x3e, 0xd7, 0xdc, 0xfc, 0x84, 0x1f, 0xc5, 0x6d, 0x3d, 0x40, 0x23, 0x34, - 0xe9, 0xcd, 0x5f, 0xd2, 0x7b, 0x7e, 0x12, 0x3d, 0x79, 0x6c, 0x73, 0x57, 0x58, 0x46, 0x59, 0x58, - 0xf8, 0x94, 0x39, 0x7f, 0xac, 0xe3, 0xaf, 0xf8, 0xd9, 0xcd, 0x4f, 0xbf, 0x13, 0x4a, 0x9b, 0x1c, - 0x77, 0x84, 0x86, 0x58, 0x0d, 0xd0, 0xe8, 0xc1, 0xa4, 0x37, 0x7f, 0x4b, 0xef, 0x7e, 0x20, 0x7a, - 0x53, 0x6a, 0x77, 0xcb, 0xc2, 0xea, 0x2c, 0x2a, 0xa5, 0xd3, 0x98, 0xc7, 0x2e, 0x7e, 0xfc, 0x81, - 0xbb, 0x11, 0x2c, 0x53, 0x2d, 0x64, 0xa2, 0x4c, 0x07, 0xf7, 0x45, 0xe2, 0x45, 0xb9, 0x0f, 0x0d, - 0x5a, 0x9f, 0xdd, 0xb5, 0x5f, 0xb4, 0x47, 0xf4, 0x17, 0xe7, 0xc3, 0x5f, 0x85, 0xf5, 0xe4, 0x22, - 0x58, 0xc9, 0x48, 0x78, 0x5b, 0xe7, 0x52, 0x61, 0x4f, 0x77, 0x47, 0x62, 0xec, 0x8f, 0xc4, 0x38, - 0x1c, 0x89, 0xf1, 0xad, 0x24, 0x68, 0x57, 0x12, 0xb4, 0x2f, 0x09, 0x3a, 0x94, 0x04, 0xfd, 0x28, - 0x09, 0xfa, 0xfe, 0x93, 0x18, 0x1f, 0x1f, 0xb6, 0xab, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf3, - 0xe1, 0xde, 0x86, 0xdb, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 83be9979..19606666 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -28,30 +28,15 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. +// PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadata { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; -} + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; -// PartialObjectMetadataList contains a list of objects containing only their metadata -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadataList { // items contains each of the included items. - repeated PartialObjectMetadata items = 1; -} - -// TableOptions are used when a Table is requested by the caller. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message TableOptions { - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - optional string includeObject = 1; + repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go index d13254b4..108a0764 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -39,6 +39,12 @@ var scheme = runtime.NewScheme() var ParameterCodec = runtime.NewParameterCodec(scheme) func init() { + if err := AddMetaToScheme(scheme); err != nil { + panic(err) + } +} + +func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Table{}, &TableOptions{}, @@ -46,11 +52,9 @@ func init() { &PartialObjectMetadataList{}, ) - if err := scheme.AddConversionFuncs( + return scheme.AddConversionFuncs( Convert_Slice_string_To_v1beta1_IncludeObjectPolicy, - ); err != nil { - panic(err) - } + ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go index 344c533e..f16170a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -18,144 +18,67 @@ limitations under the License. package v1beta1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf -// generation to support a meta type that can accept any valid JSON. - // Table is a tabular representation of a set of API resources. The server transforms the // object into a set of preferred columns for quickly reviewing the objects. -// +protobuf=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type Table struct { - v1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - v1.ListMeta `json:"metadata,omitempty"` - - // columnDefinitions describes each column in the returned items array. The number of cells per row - // will always match the number of column definitions. - ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` - // rows is the list of items in the table. - Rows []TableRow `json:"rows"` -} +// +protobuf=false +type Table = v1.Table // TableColumnDefinition contains information about a column returned in the Table. // +protobuf=false -type TableColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name"` - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Type string `json:"type"` - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Format string `json:"format"` - // description is a human readable description of this column. - Description string `json:"description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. - Priority int32 `json:"priority"` -} +type TableColumnDefinition = v1.TableColumnDefinition // TableRow is an individual row in a table. // +protobuf=false -type TableRow struct { - // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple - // maps, or lists, or null. See the type field of the column definition for a more detailed description. - Cells []interface{} `json:"cells"` - // conditions describe additional status of a row that are relevant for a human user. - // +optional - Conditions []TableRowCondition `json:"conditions,omitempty"` - // This field contains the requested additional information about each object based on the includeObject - // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the - // default serialization of the object for the current API version, and if "Metadata" (the default) will - // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. - // +optional - Object runtime.RawExtension `json:"object,omitempty"` -} +type TableRow = v1.TableRow // TableRowCondition allows a row to be marked with additional information. // +protobuf=false -type TableRowCondition struct { - // Type of row condition. - Type RowConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` - // (brief) machine readable reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} +type TableRowCondition = v1.TableRowCondition -type RowConditionType string +type RowConditionType = v1.RowConditionType -// These are valid conditions of a row. This list is not exhaustive and new conditions may be -// included by other resources. -const ( - // RowCompleted means the underlying resource has reached completion and may be given less - // visual priority than other resources. - RowCompleted RowConditionType = "Completed" -) +type ConditionStatus = v1.ConditionStatus -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// IncludeObjectPolicy controls which portion of the object is returned with a Table. -type IncludeObjectPolicy string - -const ( - // IncludeNone returns no object. - IncludeNone IncludeObjectPolicy = "None" - // IncludeMetadata serializes the object containing only its metadata field. - IncludeMetadata IncludeObjectPolicy = "Metadata" - // IncludeObject contains the full object. - IncludeObject IncludeObjectPolicy = "Object" -) +type IncludeObjectPolicy = v1.IncludeObjectPolicy // TableOptions are used when a Table is requested by the caller. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type TableOptions struct { - v1.TypeMeta `json:",inline"` - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` -} +type TableOptions = v1.TableOptions // PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients // to get access to a particular ObjectMeta schema without knowing the details of the version. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadata struct { - v1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` -} +type PartialObjectMetadata = v1.PartialObjectMetadata + +// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than +// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must +// remain independent of v1.PartialObjectMetadataList to preserve mappings. -// PartialObjectMetadataList contains a list of objects containing only their metadata +// PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type PartialObjectMetadataList struct { v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` // items contains each of the included items. - Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` + Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` } + +const ( + RowCompleted = v1.RowCompleted + + ConditionTrue = v1.ConditionTrue + ConditionFalse = v1.ConditionFalse + ConditionUnknown = v1.ConditionUnknown + + IncludeNone = v1.IncludeNone + IncludeMetadata = v1.IncludeMetadata + IncludeObject = v1.IncludeObject +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index 7394535d..ef7e7c1e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -27,78 +27,14 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PartialObjectMetadata = map[string]string{ - "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", -} - -func (PartialObjectMetadata) SwaggerDoc() map[string]string { - return map_PartialObjectMetadata -} - var map_PartialObjectMetadataList = map[string]string{ - "": "PartialObjectMetadataList contains a list of objects containing only their metadata", - "items": "items contains each of the included items.", + "": "PartialObjectMetadataList contains a list of objects containing only their metadata.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", } func (PartialObjectMetadataList) SwaggerDoc() map[string]string { return map_PartialObjectMetadataList } -var map_Table = map[string]string{ - "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", - "rows": "rows is the list of items in the table.", -} - -func (Table) SwaggerDoc() map[string]string { - return map_Table -} - -var map_TableColumnDefinition = map[string]string{ - "": "TableColumnDefinition contains information about a column returned in the Table.", - "name": "name is a human readable name for the column.", - "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "description": "description is a human readable description of this column.", - "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", -} - -func (TableColumnDefinition) SwaggerDoc() map[string]string { - return map_TableColumnDefinition -} - -var map_TableOptions = map[string]string{ - "": "TableOptions are used when a Table is requested by the caller.", - "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", -} - -func (TableOptions) SwaggerDoc() map[string]string { - return map_TableOptions -} - -var map_TableRow = map[string]string{ - "": "TableRow is an individual row in a table.", - "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", - "conditions": "conditions describe additional status of a row that are relevant for a human user.", - "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", -} - -func (TableRow) SwaggerDoc() map[string]string { - return map_TableRow -} - -var map_TableRowCondition = map[string]string{ - "": "TableRowCondition allows a row to be marked with additional information.", - "type": "Type of row condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "reason": "(brief) machine readable reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (TableRowCondition) SwaggerDoc() map[string]string { - return map_TableRowCondition -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index b77db1b1..89053b98 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -21,48 +21,20 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. -func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { - if in == nil { - return nil - } - out := new(PartialObjectMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { *out = *in out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]*PartialObjectMetadata, len(*in)) + *out = make([]v1.PartialObjectMetadata, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PartialObjectMetadata) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } return @@ -85,105 +57,3 @@ func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Table) DeepCopyInto(out *Table) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.ColumnDefinitions != nil { - in, out := &in.ColumnDefinitions, &out.ColumnDefinitions - *out = make([]TableColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Rows != nil { - in, out := &in.Rows, &out.Rows - *out = make([]TableRow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. -func (in *Table) DeepCopy() *Table { - if in == nil { - return nil - } - out := new(Table) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Table) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. -func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { - if in == nil { - return nil - } - out := new(TableColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableOptions) DeepCopyInto(out *TableOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. -func (in *TableOptions) DeepCopy() *TableOptions { - if in == nil { - return nil - } - out := new(TableOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TableOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRow) DeepCopyInto(out *TableRow) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. -func (in *TableRowCondition) DeepCopy() *TableRowCondition { - if in == nil { - return nil - } - out := new(TableRowCondition) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index b3804aa4..2f0dd007 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -54,10 +54,6 @@ func jsonTag(field reflect.StructField) (string, bool) { return tag, omitempty } -func formatValue(value interface{}) string { - return fmt.Sprintf("%v", value) -} - func isPointerKind(kind reflect.Kind) bool { return kind == reflect.Ptr } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 32db4d96..abf3ace6 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -172,7 +172,7 @@ func ConvertSelectorToLabelsMap(selector string) (Set, error) { return labelsMap, err } value := strings.TrimSpace(l[1]) - if err := validateLabelValue(value); err != nil { + if err := validateLabelValue(key, value); err != nil { return labelsMap, err } labelsMap[key] = value diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index f5a08889..9be9e57d 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -162,7 +162,7 @@ func NewRequirement(key string, op selection.Operator, vals []string) (*Requirem } for i := range vals { - if err := validateLabelValue(vals[i]); err != nil { + if err := validateLabelValue(key, vals[i]); err != nil { return nil, err } } @@ -837,9 +837,9 @@ func validateLabelKey(k string) error { return nil } -func validateLabelValue(v string) error { +func validateLabelValue(k, v string) error { if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 80343081..b3e8a53b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -94,7 +94,7 @@ func parseBool(key string) bool { } value, err := strconv.ParseBool(key) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) + utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key)) } return value } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index 322b0313..be0c5edc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -120,3 +120,32 @@ func IsMissingVersion(err error) bool { _, ok := err.(*missingVersionErr) return ok } + +// strictDecodingError is a base error type that is returned by a strict Decoder such +// as UniversalStrictDecoder. +type strictDecodingError struct { + message string + data string +} + +// NewStrictDecodingError creates a new strictDecodingError object. +func NewStrictDecodingError(message string, data string) error { + return &strictDecodingError{ + message: message, + data: data, + } +} + +func (e *strictDecodingError) Error() string { + return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) +} + +// IsStrictDecodingError returns true if the error indicates that the provided object +// strictness violations. +func IsStrictDecodingError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*strictDecodingError) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 9b15989c..af2f076b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -17,27 +17,19 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto - - It has these top-level messages: - RawExtension - TypeMeta - Unknown -*/ package runtime -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -50,27 +42,132 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (*RawExtension) ProtoMessage() {} -func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{0} +} +func (m *RawExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RawExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawExtension.Merge(m, src) +} +func (m *RawExtension) XXX_Size() int { + return m.Size() +} +func (m *RawExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RawExtension.DiscardUnknown(m) +} -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_RawExtension proto.InternalMessageInfo -func (m *Unknown) Reset() { *m = Unknown{} } -func (*Unknown) ProtoMessage() {} -func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{1} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo + +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{2} +} +func (m *Unknown) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Unknown) XXX_Merge(src proto.Message) { + xxx_messageInfo_Unknown.Merge(m, src) +} +func (m *Unknown) XXX_Size() int { + return m.Size() +} +func (m *Unknown) XXX_DiscardUnknown() { + xxx_messageInfo_Unknown.DiscardUnknown(m) +} + +var xxx_messageInfo_Unknown proto.InternalMessageInfo func init() { proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) +} + +var fileDescriptor_9d3c45d7f546725c = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, + 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, + 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, + 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, + 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, + 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, + 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, + 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, + 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, + 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, + 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, + 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, + 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, + 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, + 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, + 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, + 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, + 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, + 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, + 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, + 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, + 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, + 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, +} + func (m *RawExtension) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -78,23 +175,29 @@ func (m *RawExtension) Marshal() (dAtA []byte, err error) { } func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Raw != nil { - dAtA[i] = 0xa - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -102,25 +205,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Unknown) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -128,45 +238,60 @@ func (m *Unknown) Marshal() (dAtA []byte, err error) { } func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + i -= len(m.ContentEncoding) + copy(dAtA[i:], m.ContentEncoding) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i-- + dAtA[i] = 0x1a if m.Raw != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) - i += copy(dAtA[i:], m.ContentEncoding) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) - i += copy(dAtA[i:], m.ContentType) - return i, nil + { + size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *RawExtension) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Raw != nil { @@ -177,6 +302,9 @@ func (m *RawExtension) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIVersion) @@ -187,6 +315,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *Unknown) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.TypeMeta.Size() @@ -203,14 +334,7 @@ func (m *Unknown) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -272,7 +396,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -300,7 +424,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -309,6 +433,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -326,6 +453,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -353,7 +483,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -381,7 +511,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -391,6 +521,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -410,7 +543,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +553,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -434,6 +570,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -461,7 +600,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -489,7 +628,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -498,6 +637,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -519,7 +661,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -528,6 +670,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -550,7 +695,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -560,6 +705,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -579,7 +727,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -589,6 +737,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -603,6 +754,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -669,10 +823,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -701,6 +858,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -719,35 +879,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 378 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, - 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, - 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, - 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, - 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, - 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, - 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, - 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, - 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, - 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, - 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, - 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, - 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, - 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, - 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, - 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, - 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, - 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, - 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, - 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, - 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, - 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, - 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 33f11eb1..7bd1a3a6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -51,7 +51,7 @@ func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { func SetField(src interface{}, v reflect.Value, fieldName string) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } srcValue := reflect.ValueOf(src) if srcValue.Type().AssignableTo(field.Type()) { @@ -70,7 +70,7 @@ func SetField(src interface{}, v reflect.Value, fieldName string) error { func Field(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } destValue, err := conversion.EnforcePtr(dest) if err != nil { @@ -93,7 +93,7 @@ func Field(v reflect.Value, fieldName string, dest interface{}) error { func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } v, err := conversion.EnforcePtr(dest) if err != nil { @@ -210,3 +210,50 @@ type defaultFramer struct{} func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } + +// WithVersionEncoder serializes an object and ensures the GVK is set. +type WithVersionEncoder struct { + Version GroupVersioner + Encoder + ObjectTyper +} + +// Encode does not do conversion. It sets the gvk during serialization. +func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { + gvks, _, err := e.ObjectTyper.ObjectKinds(obj) + if err != nil { + if IsNotRegisteredError(err) { + return e.Encoder.Encode(obj, stream) + } + return err + } + kind := obj.GetObjectKind() + oldGVK := kind.GroupVersionKind() + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) + err = e.Encoder.Encode(obj, stream) + kind.SetGroupVersionKind(oldGVK) + return err +} + +// WithoutVersionDecoder clears the group version kind of a deserialized object. +type WithoutVersionDecoder struct { + Decoder +} + +// Decode does not do conversion. It removes the gvk during deserialization. +func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaults, into) + if obj != nil { + kind := obj.GetObjectKind() + // clearing the gvk is just a convention of a codec + kind.SetGroupVersionKind(schema.GroupVersionKind{}) + } + return obj, gvk, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 699ff13e..bded5bf1 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -91,6 +91,10 @@ type Framer interface { type SerializerInfo struct { // MediaType is the value that represents this serializer over the wire. MediaType string + // MediaTypeType is the first part of the MediaType ("application" in "application/json"). + MediaTypeType string + // MediaTypeSubType is the second part of the MediaType ("json" in "application/json"). + MediaTypeSubType string // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. EncodesAsText bool // Serializer is the individual object serializer for this media type. @@ -206,6 +210,25 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } +// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource +type EquivalentResourceMapper interface { + // EquivalentResourcesFor returns a list of resources that address the same underlying data as resource. + // If subresource is specified, only equivalent resources which also have the same subresource are included. + // The specified resource can be included in the returned list. + EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource + // KindFor returns the kind expected by the specified resource[/subresource]. + // A zero value is returned if the kind is unknown. + KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind +} + +// EquivalentResourceRegistry provides an EquivalentResourceMapper interface, +// and allows registering known resource[/subresource] -> kind +type EquivalentResourceRegistry interface { + EquivalentResourceMapper + // RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind. + RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) +} + // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -237,6 +260,9 @@ type Object interface { // to JSON allowed. type Unstructured interface { Object + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. + // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. + NewEmptyInstance() Unstructured // UnstructuredContent returns a non-nil map with this object's contents. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. SetUnstructuredContent should be used to mutate the contents. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go new file mode 100644 index 00000000..3ff84611 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type equivalentResourceRegistry struct { + // keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups). + // if null, or if "" is returned, resource.String() is used as the key + keyFunc func(resource schema.GroupResource) string + // resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources). + // main resources are stored with subresource="". + resources map[string]map[string][]schema.GroupVersionResource + // kinds maps resource -> subresource -> kind + kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind + // keys caches the computed key for each GroupResource + keys map[schema.GroupResource]string + + mutex sync.RWMutex +} + +var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil) +var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil) + +// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent. +func NewEquivalentResourceRegistry() EquivalentResourceRegistry { + return &equivalentResourceRegistry{} +} + +// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function. +// If "" is returned by the function, GroupResource#String is used as the identity. +// GroupResources with the same identity string are considered equivalent. +func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry { + return &equivalentResourceRegistry{keyFunc: keyFunc} +} + +func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.resources[r.keys[resource.GroupResource()]][subresource] +} +func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.kinds[resource][subresource] +} +func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) { + r.mutex.Lock() + defer r.mutex.Unlock() + if r.kinds == nil { + r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{} + } + if r.kinds[resource] == nil { + r.kinds[resource] = map[string]schema.GroupVersionKind{} + } + r.kinds[resource][subresource] = kind + + // get the shared key of the parent resource + key := "" + gr := resource.GroupResource() + if r.keyFunc != nil { + key = r.keyFunc(gr) + } + if key == "" { + key = gr.String() + } + + if r.keys == nil { + r.keys = map[schema.GroupResource]string{} + } + r.keys[gr] = key + + if r.resources == nil { + r.resources = map[string]map[string][]schema.GroupVersionResource{} + } + if r.resources[key] == nil { + r.resources[key] = map[string][]schema.GroupVersionResource{} + } + r.resources[key][subresource] = append(r.resources[key][subresource], resource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 28a61d5f..a7276649 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -17,19 +17,15 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto -/* -Package schema is a generated protocol buffer package. +package schema -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +import ( + fmt "fmt" -It has these top-level messages: -*/ -package schema + math "math" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,10 +39,10 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_0462724132518e0d = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 65f45112..f21b0ef1 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -17,9 +17,13 @@ limitations under the License. package serializer import ( + "mime" + "strings" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) @@ -44,30 +48,53 @@ type serializerType struct { StreamSerializer runtime.Serializer } -func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { - jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) - jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) - yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType { + jsonSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, + ) + jsonSerializerType := serializerType{ + AcceptContentTypes: []string{runtime.ContentTypeJSON}, + ContentType: runtime.ContentTypeJSON, + FileExtensions: []string{"json"}, + EncodesAsText: true, + Serializer: jsonSerializer, + + Framer: json.Framer, + StreamSerializer: jsonSerializer, + } + if options.Pretty { + jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict}, + ) + } - serializers := []serializerType{ - { - AcceptContentTypes: []string{"application/json"}, - ContentType: "application/json", - FileExtensions: []string{"json"}, - EncodesAsText: true, - Serializer: jsonSerializer, - PrettySerializer: jsonPrettySerializer, + yamlSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, + ) + protoSerializer := protobuf.NewSerializer(scheme, scheme) + protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) - Framer: json.Framer, - StreamSerializer: jsonSerializer, - }, + serializers := []serializerType{ + jsonSerializerType, { - AcceptContentTypes: []string{"application/yaml"}, - ContentType: "application/yaml", + AcceptContentTypes: []string{runtime.ContentTypeYAML}, + ContentType: runtime.ContentTypeYAML, FileExtensions: []string{"yaml"}, EncodesAsText: true, Serializer: yamlSerializer, }, + { + AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, + ContentType: runtime.ContentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: protoSerializer, + + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: protoRawSerializer, + }, } for _, fn := range serializerExtensions { @@ -89,14 +116,56 @@ type CodecFactory struct { legacySerializer runtime.Serializer } +// CodecFactoryOptions holds the options for configuring CodecFactory behavior +type CodecFactoryOptions struct { + // Strict configures all serializers in strict mode + Strict bool + // Pretty includes a pretty serializer along with the non-pretty one + Pretty bool +} + +// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it. +// Functions implementing this type can be passed to the NewCodecFactory() constructor. +type CodecFactoryOptionsMutator func(*CodecFactoryOptions) + +// EnablePretty enables including a pretty serializer along with the non-pretty one +func EnablePretty(options *CodecFactoryOptions) { + options.Pretty = true +} + +// DisablePretty disables including a pretty serializer along with the non-pretty one +func DisablePretty(options *CodecFactoryOptions) { + options.Pretty = false +} + +// EnableStrict enables configuring all serializers in strict mode +func EnableStrict(options *CodecFactoryOptions) { + options.Strict = true +} + +// DisableStrict disables configuring all serializers in strict mode +func DisableStrict(options *CodecFactoryOptions) { + options.Strict = false +} + // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and // only convert objects which are shared internally (Status, common API machinery). +// +// Mutators can be passed to change the CodecFactoryOptions before construction of the factory. +// It is recommended to explicitly pass mutators instead of relying on defaults. +// By default, Pretty is enabled -- this is conformant with previously supported behavior. +// // TODO: allow other codecs to be compiled in? // TODO: accept a scheme interface -func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { - serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) +func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory { + options := CodecFactoryOptions{Pretty: true} + for _, fn := range mutators { + fn(&options) + } + + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options) return newCodecFactory(scheme, serializers) } @@ -120,6 +189,15 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec Serializer: d.Serializer, PrettySerializer: d.PrettySerializer, } + + mediaType, _, err := mime.ParseMediaType(info.MediaType) + if err != nil { + panic(err) + } + parts := strings.SplitN(mediaType, "/", 2) + info.MediaTypeType = parts[0] + info.MediaTypeSubType = parts[1] + if d.StreamSerializer != nil { info.StreamSerializer = &runtime.StreamSerializerInfo{ Serializer: d.StreamSerializer, @@ -148,6 +226,12 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec } } +// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the +// caller requests it. +func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer { + return WithoutConversionCodecFactory{f} +} + // SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { return f.accepts @@ -215,23 +299,26 @@ func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.Grou return f.CodecForVersions(encoder, nil, gv, nil) } -// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion. -type DirectCodecFactory struct { +// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion. +// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future +// will be unnecessary when we change the signature of NegotiatedSerializer. +type WithoutConversionCodecFactory struct { CodecFactory } -// EncoderForVersion returns an encoder that does not do conversion. -func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { - return versioning.DirectEncoder{ +// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object +// when serialized. +func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { + return runtime.WithVersionEncoder{ Version: version, Encoder: serializer, ObjectTyper: f.CodecFactory.scheme, } } -// DecoderToVersion returns an decoder that does not do conversion. gv is ignored. -func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return versioning.DirectDecoder{ +// DecoderToVersion returns an decoder that does not do conversion. +func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return runtime.WithoutVersionDecoder{ Decoder: serializer, } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 8987e74c..de1a7d67 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -35,34 +35,56 @@ import ( // NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer // is not nil, the object has the group, version, and kind fields set. +// Deprecated: use NewSerializerWithOptions instead. func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - yaml: false, - pretty: pretty, - } + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) } // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer // is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that // matches JSON, and will error if constructs are used that do not serialize to JSON. +// Deprecated: use NewSerializerWithOptions instead. func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) +} + +// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML +// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer +// and are immutable. +func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer { return &Serializer{ meta: meta, creater: creater, typer: typer, - yaml: true, + options: options, } } +// SerializerOptions holds the options which are used to configure a JSON/YAML serializer. +// example: +// (1) To configure a JSON serializer, set `Yaml` to `false`. +// (2) To configure a YAML serializer, set `Yaml` to `true`. +// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`. +type SerializerOptions struct { + // Yaml: configures the Serializer to work with JSON(false) or YAML(true). + // When `Yaml` is enabled, this serializer only supports the subset of YAML that + // matches JSON, and will error if constructs are used that do not serialize to JSON. + Yaml bool + + // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output. + // This option is silently ignored when `Yaml` is `true`. + Pretty bool + + // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. + // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. + Strict bool +} + type Serializer struct { meta MetaFactory + options SerializerOptions creater runtime.ObjectCreater typer runtime.ObjectTyper - yaml bool - pretty bool } // Serializer implements Serializer @@ -100,7 +122,27 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { } iter.ReportError("DecodeNumber", err.Error()) default: + // init depth, if needed + if iter.Attachment == nil { + iter.Attachment = int(1) + } + + // remember current depth + originalAttachment := iter.Attachment + + // increment depth before descending + if i, ok := iter.Attachment.(int); ok { + iter.Attachment = i + 1 + if i > 10000 { + iter.ReportError("parse", "exceeded max depth") + return + } + } + *(*interface{})(ptr) = iter.Read() + + // restore current depth + iter.Attachment = originalAttachment } } @@ -119,11 +161,28 @@ func CaseSensitiveJsonIterator() jsoniter.API { return config } -// Private copy of jsoniter to try to shield against possible mutations +// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with +// the encoding/json standard library. +func StrictCaseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + DisallowUnknownFields: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} + +// Private copies of jsoniter to try to shield against possible mutations // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // in some other library will mess with every usage of the jsoniter library in the whole program. // See https://github.com/json-iterator/go/issues/265 var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() +var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { @@ -160,7 +219,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } data := originalData - if s.yaml { + if s.options.Yaml { altered, err := yaml.YAMLToJSON(data) if err != nil { return nil, nil, err @@ -216,12 +275,38 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } + + // If the deserializer is non-strict, return successfully here. + if !s.options.Strict { + return obj, actual, nil + } + + // In strict mode pass the data trough the YAMLToJSONStrict converter. + // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data, + // the output would equal the input, unless there is a parsing error such as duplicate fields. + // As we know this was successful in the non-strict case, the only error that may be returned here + // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError + // the actual error is that the object contains duplicate fields. + altered, err := yaml.YAMLToJSONStrict(originalData) + if err != nil { + return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + } + // As performance is not an issue for now for the strict deserializer (one has regardless to do + // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated + // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is + // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, + // the actual error is that the object contains unknown field. + strictObj := obj.DeepCopyObject() + if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { + return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + } + // Always return the same object as the non-strict serializer to avoid any deviations. return obj, actual, nil } // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if s.yaml { + if s.options.Yaml { json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { return err @@ -234,7 +319,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { return err } - if s.pretty { + if s.options.Pretty { data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") if err != nil { return err @@ -248,7 +333,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { - if s.yaml { + if s.options.Yaml { // we could potentially look for '---' return false, true, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index b99ba25c..0f33e1d8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -69,22 +69,18 @@ func IsNotMarshalable(err error) bool { // NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer // is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written // as-is (any type info passed with the object will be used). -// -// This encoding scheme is experimental, and is subject to change at any time. -func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer { +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { return &Serializer{ - prefix: protoEncodingPrefix, - creater: creater, - typer: typer, - contentType: defaultContentType, + prefix: protoEncodingPrefix, + creater: creater, + typer: typer, } } type Serializer struct { - prefix []byte - creater runtime.ObjectCreater - typer runtime.ObjectTyper - contentType string + prefix []byte + creater runtime.ObjectCreater + typer runtime.ObjectTyper } var _ runtime.Serializer = &Serializer{} @@ -138,7 +134,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { *intoUnknown = unk if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok { - intoUnknown.ContentType = s.contentType + intoUnknown.ContentType = runtime.ContentTypeProtobuf } return intoUnknown, &actual, nil } @@ -207,7 +203,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalTo methods + // the more efficient Size and MarshalToSizedBuffer methods encodedSize := uint64(t.Size()) estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) data := make([]byte, estimatedSize) @@ -287,6 +283,12 @@ type bufferedMarshaller interface { runtime.ProtobufMarshaller } +// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently. +type bufferedReverseMarshaller interface { + proto.Sizer + runtime.ProtobufReverseMarshaller +} + // estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown // object with a nil RawJSON struct and the expected size of the provided buffer. The // returned size will not be correct if RawJSOn is set on unk. @@ -303,20 +305,18 @@ func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 { // encoded object, and thus is not self describing (callers must know what type is being described in order to decode). // // This encoding scheme is experimental, and is subject to change at any time. -func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer { +func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer { return &RawSerializer{ - creater: creater, - typer: typer, - contentType: defaultContentType, + creater: creater, + typer: typer, } } // RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying // type). type RawSerializer struct { - creater runtime.ObjectCreater - typer runtime.ObjectTyper - contentType string + creater runtime.ObjectCreater + typer runtime.ObjectTyper } var _ runtime.Serializer = &RawSerializer{} @@ -358,7 +358,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { intoUnknown.Raw = data intoUnknown.ContentEncoding = "" - intoUnknown.ContentType = s.contentType + intoUnknown.ContentType = runtime.ContentTypeProtobuf intoUnknown.SetGroupVersionKind(*actual) return intoUnknown, actual, nil } @@ -411,12 +411,28 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, if err := proto.Unmarshal(data, pb); err != nil { return nil, actual, err } + if actual != nil { + obj.GetObjectKind().SetGroupVersionKind(*actual) + } return obj, actual, nil } // Encode serializes the provided object to the given writer. Overrides is ignored. func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { + case bufferedReverseMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalToSizedBuffer methods + encodedSize := uint64(t.Size()) + data := make([]byte, encodedSize) + + n, err := t.MarshalToSizedBuffer(data) + if err != nil { + return err + } + _, err = w.Write(data[:n]) + return err + case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement // the more efficient Size and MarshalTo methods diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go deleted file mode 100644 index 545cf78d..00000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serializer - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" -) - -const ( - // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from - // depending on it unintentionally. - // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the - // CodecFactory on initialization. - contentTypeProtobuf = "application/vnd.kubernetes.protobuf" -) - -func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { - serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) - raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) - return serializerType{ - AcceptContentTypes: []string{contentTypeProtobuf}, - ContentType: contentTypeProtobuf, - FileExtensions: []string{"pb"}, - Serializer: serializer, - - Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: raw, - }, true -} - -func init() { - serializerExtensions = append(serializerExtensions, protobufSerializer) -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 00184710..ee5cb86f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -106,20 +106,13 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { return nil, gvk, err } } // if we specify a target, use generic conversion. if into != nil { - if into == obj { - if isVersioned { - return versioned, gvk, nil - } - return into, gvk, nil - } - // perform defaulting if requested if c.defaulter != nil { // create a copy to ensure defaulting is not applied to the original versioned objects @@ -133,6 +126,14 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } } + // Short-circuit conversion if the into object is same object + if into == obj { + if isVersioned { + return versioned, gvk, nil + } + return into, gvk, nil + } + if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { return nil, gvk, err } @@ -199,84 +200,33 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { return err } + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + // restore the old GVK after encoding + defer objectKind.SetGroupVersionKind(old) + if c.encodeVersion == nil || isUnversioned { if e, ok := obj.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() objectKind.SetGroupVersionKind(gvks[0]) - err = c.encoder.Encode(obj, w) - objectKind.SetGroupVersionKind(old) - return err + return c.encoder.Encode(obj, w) } // Perform a conversion if necessary - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion) if err != nil { return err } if e, ok := out.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object - err = c.encoder.Encode(out, w) - // restore the old GVK, in case conversion returned the same object - objectKind.SetGroupVersionKind(old) - return err -} - -// DirectEncoder serializes an object and ensures the GVK is set. -type DirectEncoder struct { - Version runtime.GroupVersioner - runtime.Encoder - runtime.ObjectTyper -} - -// Encode does not do conversion. It sets the gvk during serialization. -func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error { - gvks, _, err := e.ObjectTyper.ObjectKinds(obj) - if err != nil { - if runtime.IsNotRegisteredError(err) { - return e.Encoder.Encode(obj, stream) - } - return err - } - kind := obj.GetObjectKind() - oldGVK := kind.GroupVersionKind() - gvk := gvks[0] - if e.Version != nil { - preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) - if ok { - gvk = preferredGVK - } - } - kind.SetGroupVersionKind(gvk) - err = e.Encoder.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err -} - -// DirectDecoder clears the group version kind of a deserialized object. -type DirectDecoder struct { - runtime.Decoder -} - -// Decode does not do conversion. It removes the gvk during deserialization. -func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - obj, gvk, err := d.Decoder.Decode(data, defaults, into) - if obj != nil { - kind := obj.GetObjectKind() - // clearing the gvk is just a convention of a codec - kind.SetGroupVersionKind(schema.GroupVersionKind{}) - } - return obj, gvk, err + return c.encoder.Encode(out, w) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 1f7f662e..2f0b6c9e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -41,8 +41,9 @@ type TypeMeta struct { } const ( - ContentTypeJSON string = "application/json" - ContentTypeYAML string = "application/yaml" + ContentTypeJSON string = "application/json" + ContentTypeYAML string = "application/yaml" + ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" ) // RawExtension is used to hold extensions in external versions. @@ -94,7 +95,7 @@ type RawExtension struct { // Raw is the underlying serialization of this object. // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` // Object can hold a representation of this extension - useful for working with versioned // structs. Object Object `json:"-"` diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index ead96ee0..a82227b2 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -24,46 +24,66 @@ type ProtobufMarshaller interface { MarshalTo(data []byte) (int, error) } +type ProtobufReverseMarshaller interface { + MarshalToSizedBuffer(data []byte) (int, error) +} + // NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown -// that will contain an object that implements ProtobufMarshaller. +// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + // Calculate the full size of the message. + msgSize := m.Size() + if b != nil { + msgSize += int(size) + sovGenerated(size) + 1 } - i += n1 + // Reverse marshal the fields of m. + i := msgSize + i -= len(m.ContentType) + copy(data[i:], m.ContentType) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i-- + data[i] = 0x22 + i -= len(m.ContentEncoding) + copy(data[i:], m.ContentEncoding) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i-- + data[i] = 0x1a if b != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, size) - n2, err := b.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - if uint64(n2) != size { - // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto - // struct returned would be wrong. - return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + if r, ok := b.(ProtobufReverseMarshaller); ok { + n1, err := r.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= int(size) + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } + } else { + i -= int(size) + n1, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } } - i += n2 + i = encodeVarintGenerated(data, i, size) + i-- + data[i] = 0x12 } - - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i += copy(data[i:], m.ContentEncoding) - - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i += copy(data[i:], m.ContentType) - return i, nil + n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= n2 + i = encodeVarintGenerated(data, i, uint64(n2)) + i-- + data[i] = 0xa + return msgSize - i, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 9567f900..1689e62e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -21,11 +21,18 @@ import ( "time" ) +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + // Clock allows for injecting fake or real clocks into code that // needs to do arbitrary things based on time. type Clock interface { - Now() time.Time - Since(time.Time) time.Duration + PassiveClock After(time.Duration) <-chan time.Time NewTimer(time.Duration) Timer Sleep(time.Duration) @@ -66,10 +73,15 @@ func (RealClock) Sleep(d time.Duration) { time.Sleep(d) } -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { +// FakePassiveClock implements PassiveClock, but returns an arbitrary time. +type FakePassiveClock struct { lock sync.RWMutex time time.Time +} + +// FakeClock implements Clock, but returns an arbitrary time. +type FakeClock struct { + FakePassiveClock // waiters are waiting for the fake time to pass their specified time waiters []fakeClockWaiter @@ -80,29 +92,41 @@ type fakeClockWaiter struct { stepInterval time.Duration skipIfBlocked bool destChan chan time.Time - fired bool +} + +func NewFakePassiveClock(t time.Time) *FakePassiveClock { + return &FakePassiveClock{ + time: t, + } } func NewFakeClock(t time.Time) *FakeClock { return &FakeClock{ - time: t, + FakePassiveClock: *NewFakePassiveClock(t), } } // Now returns f's time. -func (f *FakeClock) Now() time.Time { +func (f *FakePassiveClock) Now() time.Time { f.lock.RLock() defer f.lock.RUnlock() return f.time } // Since returns time since the time in f. -func (f *FakeClock) Since(ts time.Time) time.Duration { +func (f *FakePassiveClock) Since(ts time.Time) time.Duration { f.lock.RLock() defer f.lock.RUnlock() return f.time.Sub(ts) } +// Sets the time. +func (f *FakePassiveClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.time = t +} + // Fake version of time.After(d). func (f *FakeClock) After(d time.Duration) <-chan time.Time { f.lock.Lock() @@ -175,12 +199,10 @@ func (f *FakeClock) setTimeLocked(t time.Time) { if w.skipIfBlocked { select { case w.destChan <- t: - w.fired = true default: } } else { w.destChan <- t - w.fired = true } if w.stepInterval > 0 { @@ -287,36 +309,50 @@ func (f *fakeTimer) C() <-chan time.Time { return f.waiter.destChan } -// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +// Stop conditionally stops the timer. If the timer has neither fired +// nor been stopped then this call stops the timer and returns true, +// otherwise this call returns false. This is like time.Timer::Stop. func (f *fakeTimer) Stop() bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - - newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters)) - for i := range f.fakeClock.waiters { - w := &f.fakeClock.waiters[i] - if w != &f.waiter { - newWaiters = append(newWaiters, *w) + // The timer has already fired or been stopped, unless it is found + // among the clock's waiters. + stopped := false + oldWaiters := f.fakeClock.waiters + newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) + seekChan := f.waiter.destChan + for i := range oldWaiters { + // Identify the timer's fakeClockWaiter by the identity of the + // destination channel, nothing else is necessarily unique and + // constant since the timer's creation. + if oldWaiters[i].destChan == seekChan { + stopped = true + } else { + newWaiters = append(newWaiters, oldWaiters[i]) } } f.fakeClock.waiters = newWaiters - return !f.waiter.fired + return stopped } -// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet -// fired, or false otherwise. +// Reset conditionally updates the firing time of the timer. If the +// timer has neither fired nor been stopped then this call resets the +// timer to the fake clock's "now" + d and returns true, otherwise +// this call returns false. This is like time.Timer::Reset. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - - active := !f.waiter.fired - - f.waiter.fired = false - f.waiter.targetTime = f.fakeClock.time.Add(d) - - return active + waiters := f.fakeClock.waiters + seekChan := f.waiter.destChan + for i := range waiters { + if waiters[i].destChan == seekChan { + waiters[i].targetTime = f.fakeClock.time.Add(d) + return true + } + } + return false } type Ticker interface { diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index 06042617..a006b925 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -18,16 +18,12 @@ package diff import ( "bytes" - "encoding/json" "fmt" - "reflect" - "sort" "strings" "text/tabwriter" "github.com/davecgh/go-spew/spew" - - "k8s.io/apimachinery/pkg/util/validation/field" + "github.com/google/go-cmp/cmp" ) // StringDiff diffs a and b and returns a human readable diff. @@ -50,220 +46,29 @@ func StringDiff(a, b string) string { return string(out) } -// ObjectDiff writes the two objects out as JSON and prints out the identical part of -// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. -// For debugging tests. +func legacyDiff(a, b interface{}) string { + return cmp.Diff(a, b) +} + +// ObjectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectDiff(a, b interface{}) string { - ab, err := json.Marshal(a) - if err != nil { - panic(fmt.Sprintf("a: %v", err)) - } - bb, err := json.Marshal(b) - if err != nil { - panic(fmt.Sprintf("b: %v", err)) - } - return StringDiff(string(ab), string(bb)) + return legacyDiff(a, b) } -// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, -// which shows absolutely everything by recursing into every single pointer -// (go's %#v formatters OTOH stop at a certain point). This is needed when you -// can't figure out why reflect.DeepEqual is returning false and nothing is -// showing you differences. This will. +// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectGoPrintDiff(a, b interface{}) string { - s := spew.ConfigState{DisableMethods: true} - return StringDiff( - s.Sprintf("%#v", a), - s.Sprintf("%#v", b), - ) + return legacyDiff(a, b) } +// ObjectReflectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectReflectDiff(a, b interface{}) string { - vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) - if vA.Type() != vB.Type() { - return fmt.Sprintf("type A %T and type B %T do not match", a, b) - } - diffs := objectReflectDiff(field.NewPath("object"), vA, vB) - if len(diffs) == 0 { - return "" - } - out := []string{""} - for _, d := range diffs { - elidedA, elidedB := limit(d.a, d.b, 80) - out = append(out, - fmt.Sprintf("%s:", d.path), - fmt.Sprintf(" a: %s", elidedA), - fmt.Sprintf(" b: %s", elidedB), - ) - } - return strings.Join(out, "\n") -} - -// limit: -// 1. stringifies aObj and bObj -// 2. elides identical prefixes if either is too long -// 3. elides remaining content from the end if either is too long -func limit(aObj, bObj interface{}, max int) (string, string) { - elidedPrefix := "" - elidedASuffix := "" - elidedBSuffix := "" - a, b := fmt.Sprintf("%#v", aObj), fmt.Sprintf("%#v", bObj) - - if aObj != nil && bObj != nil { - if aType, bType := fmt.Sprintf("%T", aObj), fmt.Sprintf("%T", bObj); aType != bType { - a = fmt.Sprintf("%s (%s)", a, aType) - b = fmt.Sprintf("%s (%s)", b, bType) - } - } - - for { - switch { - case len(a) > max && len(a) > 4 && len(b) > 4 && a[:4] == b[:4]: - // a is too long, b has data, and the first several characters are the same - elidedPrefix = "..." - a = a[2:] - b = b[2:] - - case len(b) > max && len(b) > 4 && len(a) > 4 && a[:4] == b[:4]: - // b is too long, a has data, and the first several characters are the same - elidedPrefix = "..." - a = a[2:] - b = b[2:] - - case len(a) > max: - a = a[:max] - elidedASuffix = "..." - - case len(b) > max: - b = b[:max] - elidedBSuffix = "..." - - default: - // both are short enough - return elidedPrefix + a + elidedASuffix, elidedPrefix + b + elidedBSuffix - } - } -} - -func public(s string) bool { - if len(s) == 0 { - return false - } - return s[:1] == strings.ToUpper(s[:1]) -} - -type diff struct { - path *field.Path - a, b interface{} -} - -type orderedDiffs []diff - -func (d orderedDiffs) Len() int { return len(d) } -func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d orderedDiffs) Less(i, j int) bool { - a, b := d[i].path.String(), d[j].path.String() - if a < b { - return true - } - return false -} - -func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { - switch a.Type().Kind() { - case reflect.Struct: - var changes []diff - for i := 0; i < a.Type().NumField(); i++ { - if !public(a.Type().Field(i).Name) { - if reflect.DeepEqual(a.Interface(), b.Interface()) { - continue - } - return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} - } - if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { - changes = append(changes, sub...) - } - } - return changes - case reflect.Ptr, reflect.Interface: - if a.IsNil() || b.IsNil() { - switch { - case a.IsNil() && b.IsNil(): - return nil - case a.IsNil(): - return []diff{{path: path, a: nil, b: b.Interface()}} - default: - return []diff{{path: path, a: a.Interface(), b: nil}} - } - } - return objectReflectDiff(path, a.Elem(), b.Elem()) - case reflect.Chan: - if !reflect.DeepEqual(a.Interface(), b.Interface()) { - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } - return nil - case reflect.Slice: - lA, lB := a.Len(), b.Len() - l := lA - if lB < lA { - l = lB - } - if lA == lB && lA == 0 { - if a.IsNil() != b.IsNil() { - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } - return nil - } - var diffs []diff - for i := 0; i < l; i++ { - if !reflect.DeepEqual(a.Index(i), b.Index(i)) { - diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...) - } - } - for i := l; i < lA; i++ { - diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) - } - for i := l; i < lB; i++ { - diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) - } - return diffs - case reflect.Map: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return nil - } - aKeys := make(map[interface{}]interface{}) - for _, key := range a.MapKeys() { - aKeys[key.Interface()] = a.MapIndex(key).Interface() - } - var missing []diff - for _, key := range b.MapKeys() { - if _, ok := aKeys[key.Interface()]; ok { - delete(aKeys, key.Interface()) - if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { - continue - } - missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...) - continue - } - missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) - } - for key, value := range aKeys { - missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) - } - if len(missing) == 0 { - missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()}) - } - sort.Sort(orderedDiffs(missing)) - return missing - default: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return nil - } - if !a.CanInterface() { - return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} - } - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } + return legacyDiff(a, b) } // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 48dd7d9c..64cbc770 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -17,22 +17,17 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -/* - Package intstr is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto - - It has these top-level messages: - IntOrString -*/ package intstr -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + + io "io" + math "math" + math_bits "math/bits" -import io "io" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -45,17 +40,69 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { + return fileDescriptor_94e046ae3ce6121c, []int{0} +} +func (m *IntOrString) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IntOrString) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntOrString.Merge(m, src) +} +func (m *IntOrString) XXX_Size() int { + return m.Size() +} +func (m *IntOrString) XXX_DiscardUnknown() { + xxx_messageInfo_IntOrString.DiscardUnknown(m) +} + +var xxx_messageInfo_IntOrString proto.InternalMessageInfo func init() { proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) +} + +var fileDescriptor_94e046ae3ce6121c = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} + func (m *IntOrString) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -63,33 +110,44 @@ func (m *IntOrString) Marshal() (dAtA []byte, err error) { } func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) - dAtA[i] = 0x1a - i++ + i -= len(m.StrVal) + copy(dAtA[i:], m.StrVal) i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) - i += copy(dAtA[i:], m.StrVal) - return i, nil + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IntOrString) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Type)) @@ -100,14 +158,7 @@ func (m *IntOrString) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -127,7 +178,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -155,7 +206,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= (Type(b) & 0x7F) << shift + m.Type |= Type(b&0x7F) << shift if b < 0x80 { break } @@ -174,7 +225,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IntVal |= (int32(b) & 0x7F) << shift + m.IntVal |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -193,7 +244,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -203,6 +254,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -217,6 +271,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -283,10 +340,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -315,6 +375,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -333,30 +396,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, - 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, - 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, - 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, - 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, - 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, - 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, - 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, - 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, - 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, - 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, - 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, - 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, - 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, - 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, - 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, - 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, - 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, - 0x64, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 5b26ed26..2df62955 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -45,7 +45,7 @@ type IntOrString struct { } // Type represents the stored type of IntOrString. -type Type int +type Type int64 const ( Int Type = iota // The IntOrString holds an int. @@ -122,11 +122,11 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { // the OpenAPI spec of this type. // // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } +func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. -func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } +func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 10c8cb83..20483488 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -19,6 +19,7 @@ package json import ( "bytes" "encoding/json" + "fmt" "io" ) @@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + // Unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { @@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v) + return convertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -60,25 +64,54 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v) + return convertSliceNumbers(*v, 0) + + case *interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertInterfaceNumbers(v, 0) default: return json.Unmarshal(data, v) } } +func convertInterfaceNumbers(v *interface{}, depth int) error { + var err error + switch v2 := (*v).(type) { + case json.Number: + *v, err = convertNumber(v2) + case map[string]interface{}: + err = convertMapNumbers(v2, depth+1) + case []interface{}: + err = convertSliceNumbers(v2, depth+1) + } + return err +} + // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}) error { +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for k, v := range m { switch v := v.(type) { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err @@ -89,16 +122,20 @@ func convertMapNumbers(m map[string]interface{}) error { // convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}) error { +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for i, v := range s { switch v := v.(type) { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 078f00d9..c24fbc69 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -55,6 +55,12 @@ func JoinPreservingTrailingSlash(elem ...string) string { return result } +// IsTimeout returns true if the given error is a network timeout error +func IsTimeout(err error) bool { + neterr, ok := err.(net.Error) + return ok && neterr != nil && neterr.Timeout() +} + // IsProbableEOF returns true if the given error resembles a connection termination // scenario that would justify assuming that the watch is empty. // These errors are what the Go http stack returns back to us which are general @@ -101,6 +107,9 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { if t.TLSHandshakeTimeout == 0 { t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout } + if t.IdleConnTimeout == 0 { + t.IdleConnTimeout = defaultTransport.IdleConnTimeout + } return t } @@ -111,7 +120,7 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { klog.Infof("HTTP2 has been explicitly disabled") - } else { + } else if allowsHTTP2(t) { if err := http2.ConfigureTransport(t); err != nil { klog.Warningf("Transport failed http2 configuration: %v", err) } @@ -119,6 +128,21 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { return t } +func allowsHTTP2(t *http.Transport) bool { + if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { + // the transport expressed no NextProto preference, allow + return true + } + for _, p := range t.TLSClientConfig.NextProtos { + if p == http2.NextProtoTLS { + // the transport explicitly allowed http/2 + return true + } + } + // the transport explicitly set NextProtos and excluded http/2 + return false +} + type RoundTripperWrapper interface { http.RoundTripper WrappedRoundTripper() http.RoundTripper @@ -407,7 +431,7 @@ redirectLoop: // Only follow redirects to the same host. Otherwise, propagate the redirect response back. if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { - break redirectLoop + return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname()) } // Reset the connection. diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 8344d10c..2e7cb949 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -54,3 +54,20 @@ func IsConnectionReset(err error) bool { } return false } + +// Returns if the given err is "connection refused" error +func IsConnectionRefused(err error) bool { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { + return true + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 8e34f926..1428443f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -18,6 +18,7 @@ package runtime import ( "fmt" + "net/http" "runtime" "sync" "time" @@ -40,11 +41,7 @@ var PanicHandlers = []func(interface{}){logPanic} // called in case of panic. HandleCrash actually crashes, after calling the // handlers and logging the panic message. // -// TODO: remove this function. We are switching to a world where it's safe for -// apiserver to panic, since it will be restarted by kubelet. At the beginning -// of the Kubernetes project, nothing was going to restart apiserver and so -// catching panics was important. But it's actually much simpler for monitoring -// software if we just exit when an unexpected panic happens. +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { for _, fn := range PanicHandlers { @@ -60,27 +57,26 @@ func HandleCrash(additionalHandlers ...func(interface{})) { } } -// logPanic logs the caller tree when a panic occurs. +// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). func logPanic(r interface{}) { - callers := getCallers(r) - if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%v", r, callers) - } else { - klog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + if r == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return } -} -func getCallers(r interface{}) string { - callers := "" - for i := 0; true; i++ { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - callers = callers + fmt.Sprintf("%v:%v\n", file, line) + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + if _, ok := r.(string); ok { + klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + } else { + klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) } - - return callers } // ErrorHandlers is a list of functions which will be invoked when an unreturnable @@ -155,13 +151,17 @@ func GetCaller() string { // handlers to handle errors and panics the same way. func RecoverFromPanic(err *error) { if r := recover(); r != nil { - callers := getCallers(r) + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] *err = fmt.Errorf( - "recovered from panic %q. (err=%v) Call stack:\n%v", + "recovered from panic %q. (err=%v) Call stack:\n%s", r, *err, - callers) + stacktrace) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 766f4501..9bfa85d4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -46,17 +46,19 @@ func ByteKeySet(theMap interface{}) Byte { } // Insert adds items to the set. -func (s Byte) Insert(items ...byte) { +func (s Byte) Insert(items ...byte) Byte { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Byte) Delete(items ...byte) { +func (s Byte) Delete(items ...byte) Byte { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index a0a513cd..88bd7096 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -46,17 +46,19 @@ func IntKeySet(theMap interface{}) Int { } // Insert adds items to the set. -func (s Int) Insert(items ...int) { +func (s Int) Insert(items ...int) Int { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int) Delete(items ...int) { +func (s Int) Delete(items ...int) Int { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go new file mode 100644 index 00000000..96a48555 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) Int32 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) Int32 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index 9ca9af0c..b375a1b0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -46,17 +46,19 @@ func Int64KeySet(theMap interface{}) Int64 { } // Insert adds items to the set. -func (s Int64) Insert(items ...int64) { +func (s Int64) Insert(items ...int64) Int64 { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int64) Delete(items ...int64) { +func (s Int64) Delete(items ...int64) Int64 { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index ba00ad7d..e6f37db8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -46,17 +46,19 @@ func StringKeySet(theMap interface{}) String { } // Insert adds items to the set. -func (s String) Insert(items ...string) { +func (s String) Insert(items ...string) String { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s String) Delete(items ...string) { +func (s String) Delete(items ...string) String { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go index bf478223..1fa351aa 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go +++ b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go @@ -17,27 +17,11 @@ limitations under the License. package uuid import ( - "sync" - - "github.com/pborman/uuid" + "github.com/google/uuid" "k8s.io/apimachinery/pkg/types" ) -var uuidLock sync.Mutex -var lastUUID uuid.UUID - func NewUUID() types.UID { - uuidLock.Lock() - defer uuidLock.Unlock() - result := uuid.NewUUID() - // The UUID package is naive and can generate identical UUIDs if the - // time interval is quick enough. - // The UUID uses 100 ns increments so it's short enough to actively - // wait for a new value. - for uuid.Equal(lastUUID, result) == true { - result = uuid.NewUUID() - } - lastUUID = result - return types.UID(result.String()) + return types.UID(uuid.New().String()) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 4767fd1d..1d372a52 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -116,6 +116,10 @@ const ( // This is similar to ErrorTypeInvalid, but the error will not include the // too-long value. See TooLong(). ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + ErrorTypeTooMany ErrorType = "FieldValueTooMany" // ErrorTypeInternal is used to report other errors that are not related // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" @@ -138,6 +142,8 @@ func (t ErrorType) String() string { return "Forbidden" case ErrorTypeTooLong: return "Too long" + case ErrorTypeTooMany: + return "Too many" case ErrorTypeInternal: return "Internal error" default: @@ -201,6 +207,13 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error { return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} } +// TooMany returns a *Error indicating "too many". This is used to +// report that a given list has too many items. This is similar to TooLong, +// but the returned error indicates quantity instead of length. +func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { + return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} +} + // InternalError returns a *Error indicating "internal error". This is used // to signal that an error was found that was not directly related to user // input. The err argument must be non-nil. diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 20417756..386c3e7e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -207,23 +207,31 @@ type ConditionFunc func() (done bool, err error) type Backoff struct { // The initial duration. Duration time.Duration - // Duration is multiplied by factor each iteration. Must be greater - // than or equal to zero. + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. Factor float64 - // The amount of jitter applied each iteration. Jitter is applied after - // cap. + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. Jitter float64 - // The number of steps before duration stops changing. If zero, initial - // duration is always used. Used for exponential backoff in combination - // with Factor. + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. Steps int - // The returned duration will never be greater than cap *before* jitter - // is applied. The actual maximum cap is `cap * (1.0 + jitter)`. + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. Cap time.Duration } -// Step returns the next interval in the exponential backoff. This method -// will mutate the provided backoff. +// Step (1) returns an amount of time to sleep determined by the +// original Duration and Jitter and (2) mutates the provided Backoff +// to update its Steps and Duration. func (b *Backoff) Step() time.Duration { if b.Steps < 1 { if b.Jitter > 0 { @@ -250,16 +258,35 @@ func (b *Backoff) Step() time.Duration { return duration } -// ExponentialBackoff repeats a condition check with exponential backoff. +// contextForChannel derives a child context from a parent channel. // -// It checks the condition up to Steps times, increasing the wait by multiplying -// the previous duration by Factor. +// The derived context's Done channel is closed when the returned cancel function +// is called or when the parent channel is closed, whichever happens first. // -// If Jitter is greater than zero, a random amount of each duration is added -// (between duration and duration*(1+jitter)). +// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. +func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + select { + case <-parentCh: + cancel() + case <-ctx.Done(): + } + }() + return ctx, cancel +} + +// ExponentialBackoff repeats a condition check with exponential backoff. // -// If the condition never returns true, ErrWaitTimeout is returned. All other -// errors terminate immediately. +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { for backoff.Steps > 0 { if ok, err := condition(); err != nil || ok { @@ -353,7 +380,9 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro // PollUntil always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - return WaitFor(poller(interval, 0), condition, stopCh) + ctx, cancel := contextForChannel(stopCh) + defer cancel() + return WaitFor(poller(interval, 0), condition, ctx.Done()) } // PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. @@ -422,7 +451,9 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // timeout has elapsed and then closes the channel. // // Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity. +// closed. A timeout of 0 is interpreted as an infinity, and in such a case +// it would be the caller's responsibility to close the done channel. +// Failure to do so would result in a leaked goroutine. // // Output ticks are not buffered. If the channel is not ready to receive an // item, the tick is skipped. diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index d61cf5a2..4269a836 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -17,13 +17,15 @@ limitations under the License. package watch import ( + "fmt" "io" "sync" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog" ) // Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. @@ -39,19 +41,28 @@ type Decoder interface { Close() } +// Reporter hides the details of how an error is turned into a runtime.Object for +// reporting on a watch stream since this package may not import a higher level report. +type Reporter interface { + // AsObject must convert err into a valid runtime.Object for the watch stream. + AsObject(err error) runtime.Object +} + // StreamWatcher turns any stream for which you can write a Decoder interface // into a watch.Interface. type StreamWatcher struct { sync.Mutex - source Decoder - result chan Event - stopped bool + source Decoder + reporter Reporter + result chan Event + stopped bool } // NewStreamWatcher creates a StreamWatcher from the given decoder. -func NewStreamWatcher(d Decoder) *StreamWatcher { +func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { sw := &StreamWatcher{ - source: d, + source: d, + reporter: r, // It's easy for a consumer to add buffering via an extra // goroutine/channel, but impossible for them to remove it, // so nonbuffered is better. @@ -102,11 +113,13 @@ func (sw *StreamWatcher) receive() { case io.ErrUnexpectedEOF: klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: - msg := "Unable to decode an event from the watch stream: %v" - if net.IsProbableEOF(err) { - klog.V(5).Infof(msg, err) + if net.IsProbableEOF(err) || net.IsTimeout(err) { + klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) } else { - klog.Errorf(msg, err) + sw.result <- Event{ + Type: Error, + Object: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)), + } } } return diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index be9c90c0..3945be3a 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -44,6 +44,7 @@ const ( Added EventType = "ADDED" Modified EventType = "MODIFIED" Deleted EventType = "DELETED" + Bookmark EventType = "BOOKMARK" Error EventType = "ERROR" DefaultChanSize int32 = 100 @@ -57,6 +58,10 @@ type Event struct { // Object is: // * If Type is Added or Modified: the new state of the object. // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Bookmark: the object (instance of a type being watched) where + // only ResourceVersion field is set. On successful restart of watch from a + // bookmark resourceVersion, client is guaranteed to not get repeat event + // nor miss any events. // * If Type is Error: *api.Status is recommended; other types may make sense // depending on context. Object runtime.Object diff --git a/vendor/k8s.io/client-go/discovery/doc.go b/vendor/k8s.io/client-go/discovery/doc.go index 76495588..6baa1ef2 100644 --- a/vendor/k8s.io/client-go/discovery/doc.go +++ b/vendor/k8s.io/client-go/discovery/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package discovery provides ways to discover server-supported // API groups, versions and resources. -package discovery +package discovery // import "k8s.io/client-go/discovery" diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go index f56fe31e..14a6db43 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go @@ -19,12 +19,15 @@ limitations under the License. package admissionregistration import ( + v1 "k8s.io/client-go/informers/admissionregistration/v1" v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -40,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go new file mode 100644 index 00000000..1ecae9ec --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. + MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer { + return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { + return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..4fadd9a2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/admissionregistration/v1" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for +// MutatingWebhookConfigurations. +type MutatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.MutatingWebhookConfigurationLister +} + +type mutatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistrationv1.MutatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *mutatingWebhookConfigurationInformer) Lister() v1.MutatingWebhookConfigurationLister { + return v1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..1c648e60 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/admissionregistration/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for +// ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ValidatingWebhookConfigurationLister +} + +type validatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistrationv1.ValidatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *validatingWebhookConfigurationInformer) Lister() v1.ValidatingWebhookConfigurationLister { + return v1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/interface.go b/vendor/k8s.io/client-go/informers/discovery/interface.go new file mode 100644 index 00000000..eb8062fe --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package discovery + +import ( + v1alpha1 "k8s.io/client-go/informers/discovery/v1alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go new file mode 100644 index 00000000..a545ce15 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/discovery/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// EndpointSliceInformer provides access to a shared informer and lister for +// EndpointSlices. +type EndpointSliceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.EndpointSliceLister +} + +type endpointSliceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1alpha1().EndpointSlices(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1alpha1().EndpointSlices(namespace).Watch(options) + }, + }, + &discoveryv1alpha1.EndpointSlice{}, + resyncPeriod, + indexers, + ) +} + +func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&discoveryv1alpha1.EndpointSlice{}, f.defaultInformer) +} + +func (f *endpointSliceInformer) Lister() v1alpha1.EndpointSliceLister { + return v1alpha1.NewEndpointSliceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go new file mode 100644 index 00000000..711dcae5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // EndpointSlices returns a EndpointSliceInformer. + EndpointSlices() EndpointSliceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// EndpointSlices returns a EndpointSliceInformer. +func (v *version) EndpointSlices() EndpointSliceInformer { + return &endpointSliceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go index a259d27a..6f0bea7e 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -30,6 +30,8 @@ type Interface interface { Deployments() DeploymentInformer // Ingresses returns a IngressInformer. Ingresses() IngressInformer + // NetworkPolicies returns a NetworkPolicyInformer. + NetworkPolicies() NetworkPolicyInformer // PodSecurityPolicies returns a PodSecurityPolicyInformer. PodSecurityPolicies() PodSecurityPolicyInformer // ReplicaSets returns a ReplicaSetInformer. @@ -62,6 +64,11 @@ func (v *version) Ingresses() IngressInformer { return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// NetworkPolicies returns a NetworkPolicyInformer. +func (v *version) NetworkPolicies() NetworkPolicyInformer { + return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // PodSecurityPolicies returns a PodSecurityPolicyInformer. func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go new file mode 100644 index 00000000..92f4f040 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkPolicyInformer provides access to a shared informer and lister for +// NetworkPolicies. +type NetworkPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.NetworkPolicyLister +} + +type networkPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(options) + }, + }, + &extensionsv1beta1.NetworkPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensionsv1beta1.NetworkPolicy{}, f.defaultInformer) +} + +func (f *networkPolicyInformer) Lister() v1beta1.NetworkPolicyLister { + return v1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index b3a04300..c5230a49 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -34,6 +34,7 @@ import ( certificates "k8s.io/client-go/informers/certificates" coordination "k8s.io/client-go/informers/coordination" core "k8s.io/client-go/informers/core" + discovery "k8s.io/client-go/informers/discovery" events "k8s.io/client-go/informers/events" extensions "k8s.io/client-go/informers/extensions" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" @@ -196,6 +197,7 @@ type SharedInformerFactory interface { Certificates() certificates.Interface Coordination() coordination.Interface Core() core.Interface + Discovery() discovery.Interface Events() events.Interface Extensions() extensions.Interface Networking() networking.Interface @@ -239,6 +241,10 @@ func (f *sharedInformerFactory) Core() core.Interface { return core.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Discovery() discovery.Interface { + return discovery.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Events() events.Interface { return events.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index fd5811cd..58f39460 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -21,8 +21,9 @@ package informers import ( "fmt" + v1 "k8s.io/api/admissionregistration/v1" v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" v1alpha1 "k8s.io/api/auditregistration/v1alpha1" @@ -36,6 +37,7 @@ import ( coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" @@ -83,22 +85,28 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=admissionregistration.k8s.io, Version=v1beta1 + // Group=admissionregistration.k8s.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().MutatingWebhookConfigurations().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil + + // Group=admissionregistration.k8s.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil // Group=apps, Version=v1 - case v1.SchemeGroupVersion.WithResource("controllerrevisions"): + case appsv1.SchemeGroupVersion.WithResource("controllerrevisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ControllerRevisions().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("daemonsets"): + case appsv1.SchemeGroupVersion.WithResource("daemonsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().DaemonSets().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("deployments"): + case appsv1.SchemeGroupVersion.WithResource("deployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().Deployments().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("replicasets"): + case appsv1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ReplicaSets().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("statefulsets"): + case appsv1.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().StatefulSets().Informer()}, nil // Group=apps, Version=v1beta1 @@ -195,6 +203,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case corev1.SchemeGroupVersion.WithResource("serviceaccounts"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ServiceAccounts().Informer()}, nil + // Group=discovery.k8s.io, Version=v1alpha1 + case discoveryv1alpha1.SchemeGroupVersion.WithResource("endpointslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Discovery().V1alpha1().EndpointSlices().Informer()}, nil + // Group=events.k8s.io, Version=v1beta1 case eventsv1beta1.SchemeGroupVersion.WithResource("events"): return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1beta1().Events().Informer()}, nil @@ -206,6 +218,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Deployments().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil + case extensionsv1beta1.SchemeGroupVersion.WithResource("networkpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().NetworkPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index fb889e6d..f8a237f6 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -19,7 +19,10 @@ limitations under the License. package kubernetes import ( + "fmt" + discovery "k8s.io/client-go/discovery" + admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" @@ -39,6 +42,7 @@ import ( coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" @@ -62,6 +66,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface + AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface @@ -81,6 +86,7 @@ type Interface interface { CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface + DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface @@ -104,6 +110,7 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient + admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client appsV1 *appsv1.AppsV1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client @@ -123,6 +130,7 @@ type Clientset struct { coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client + discoveryV1alpha1 *discoveryv1alpha1.DiscoveryV1alpha1Client eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client networkingV1 *networkingv1.NetworkingV1Client @@ -142,6 +150,11 @@ type Clientset struct { storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } +// AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client +func (c *Clientset) AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface { + return c.admissionregistrationV1 +} + // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { return c.admissionregistrationV1beta1 @@ -237,6 +250,11 @@ func (c *Clientset) CoreV1() corev1.CoreV1Interface { return c.coreV1 } +// DiscoveryV1alpha1 retrieves the DiscoveryV1alpha1Client +func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface { + return c.discoveryV1alpha1 +} + // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 @@ -331,13 +349,22 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset var err error + cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -414,6 +441,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.discoveryV1alpha1, err = discoveryv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -494,6 +525,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset + cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) @@ -513,6 +545,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.discoveryV1alpha1 = discoveryv1alpha1.NewForConfigOrDie(c) cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) @@ -538,6 +571,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.admissionregistrationV1 = admissionregistrationv1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) @@ -557,6 +591,7 @@ func New(c rest.Interface) *Clientset { cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) + cs.discoveryV1alpha1 = discoveryv1alpha1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 8346d26a..c1a2c519 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -19,6 +19,7 @@ limitations under the License. package scheme import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -38,6 +39,7 @@ import ( coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" @@ -66,6 +68,7 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, @@ -85,6 +88,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, + discoveryv1alpha1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go new file mode 100644 index 00000000..75127315 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1Client { + return &AdmissionregistrationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go new file mode 100644 index 00000000..3af5d054 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go new file mode 100644 index 00000000..a5b062e3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type MutatingWebhookConfigurationExpansion interface{} + +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..1f5e5e38 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) + Update(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) + List(opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..7987b6e3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) + Update(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) + List(opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index b13ea795..2d93ff02 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -76,7 +75,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index da19c759..621c734a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -91,7 +90,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 2c9db886..e5dd64d9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -81,7 +80,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 99d677f4..7ca4e0b2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -20,7 +20,6 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -91,7 +90,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go index f007b05e..ec63179e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/auditregistration/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index 3bdcee59..de8864e2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/authentication/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 7f3334a0..816bd0a2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/authentication/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index e84b9008..2cc22632 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/authorization/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 7f236f6e..88eac75b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/authorization/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index 2bd49e2d..4f3e96ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/autoscaling/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index 3a49b26b..c1a91fc3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -20,7 +20,6 @@ package v2beta1 import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go index 03fe25e4..bd2b3927 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -20,7 +20,6 @@ package v2beta2 import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index d5e35e6b..8dfc118a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/batch/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index aa71ca83..25708535 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/batch/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go index e6c6306b..d45c19d5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -20,7 +20,6 @@ package v2alpha1 import ( v2alpha1 "k8s.io/api/batch/v2alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index baac42ee..1c52d551 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/certificates/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go index 9b566f31..0df7b71b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/coordination/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go index 91a76484..d68ed5d3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/coordination/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 044a28eb..428d2afa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/core/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -146,7 +145,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/api" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 8d6b6e87..feacd307 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -46,6 +46,9 @@ type PodInterface interface { List(opts metav1.ListOptions) (*v1.PodList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + GetEphemeralContainers(podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error) + UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (*v1.EphemeralContainers, error) + PodExpansion } @@ -189,3 +192,31 @@ func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources Into(result) return } + +// GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any. +func (c *pods) GetEphemeralContainers(podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) { + result = &v1.EphemeralContainers{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. +func (c *pods) UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (result *v1.EphemeralContainers, err error) { + result = &v1.EphemeralContainers{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + Body(ephemeralContainers). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go new file mode 100644 index 00000000..e65a0988 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/discovery/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type DiscoveryV1alpha1Interface interface { + RESTClient() rest.Interface + EndpointSlicesGetter +} + +// DiscoveryV1alpha1Client is used to interact with features provided by the discovery.k8s.io group. +type DiscoveryV1alpha1Client struct { + restClient rest.Interface +} + +func (c *DiscoveryV1alpha1Client) EndpointSlices(namespace string) EndpointSliceInterface { + return newEndpointSlices(c, namespace) +} + +// NewForConfig creates a new DiscoveryV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*DiscoveryV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &DiscoveryV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new DiscoveryV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *DiscoveryV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new DiscoveryV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *DiscoveryV1alpha1Client { + return &DiscoveryV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go new file mode 100644 index 00000000..df51baa4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go new file mode 100644 index 00000000..41751476 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/discovery/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EndpointSlicesGetter has a method to return a EndpointSliceInterface. +// A group's client should implement this interface. +type EndpointSlicesGetter interface { + EndpointSlices(namespace string) EndpointSliceInterface +} + +// EndpointSliceInterface has methods to work with EndpointSlice resources. +type EndpointSliceInterface interface { + Create(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) + Update(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.EndpointSlice, error) + List(opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) + EndpointSliceExpansion +} + +// endpointSlices implements EndpointSliceInterface +type endpointSlices struct { + client rest.Interface + ns string +} + +// newEndpointSlices returns a EndpointSlices +func newEndpointSlices(c *DiscoveryV1alpha1Client, namespace string) *endpointSlices { + return &endpointSlices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *endpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.EndpointSliceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpointslices"). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpointslices"). + Name(endpointSlice.Name). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpointslices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go new file mode 100644 index 00000000..e8ceb59a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type EndpointSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go new file mode 100644 index 00000000..312ee428 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/types" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +// TODO: Add querying functions to the event expansion +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create + // except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) + // UpdateWithEventNamespace is the same as a Update + // except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) + // PatchWithEventNamespace is the same as an Update + // except that it sends the request to the event.Namespace. + PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) +} + +// CreateWithEventNamespace makes a new event. +// Returns the copy of the event the server returns, or an error. +// The namespace to create the event within is deduced from the event. +// it must either match this event client's namespace, or this event client must +// have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. +// It returns the copy of the event that the server returns, or an error. +// The namespace and key to update the event within is deduced from the event. +// The namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. +// Update also requires the ResourceVersion to be set in the event object. +func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// PatchWithEventNamespace modifies an existing event. +// It returns the copy of the event that the server returns, or an error. +// The namespace and name of the target event is deduced from the event. +// The namespace must either match this event client's namespace, or this event client must +// have been created with the "" namespace. +func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Patch(types.StrategicMergePatchType). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(data). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index fb59635b..e372ccff 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/events/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go index e27f693f..f6df7696 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -17,5 +17,3 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. package v1beta1 - -type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 0e9edf5c..e3b22aa4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +29,7 @@ type ExtensionsV1beta1Interface interface { DaemonSetsGetter DeploymentsGetter IngressesGetter + NetworkPoliciesGetter PodSecurityPoliciesGetter ReplicaSetsGetter } @@ -51,6 +51,10 @@ func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } +func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolicyInterface { + return newNetworkPolicies(c, namespace) +} + func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { return newPodSecurityPolicies(c) } @@ -91,7 +95,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index cfaeebd0..41d28f04 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -22,6 +22,8 @@ type DaemonSetExpansion interface{} type IngressExpansion interface{} +type NetworkPolicyExpansion interface{} + type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go new file mode 100644 index 00000000..0607e2dd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. +// A group's client should implement this interface. +type NetworkPoliciesGetter interface { + NetworkPolicies(namespace string) NetworkPolicyInterface +} + +// NetworkPolicyInterface has methods to work with NetworkPolicy resources. +type NetworkPolicyInterface interface { + Create(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) + Update(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.NetworkPolicy, error) + List(opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) + NetworkPolicyExpansion +} + +// networkPolicies implements NetworkPolicyInterface +type networkPolicies struct { + client rest.Interface + ns string +} + +// newNetworkPolicies returns a NetworkPolicies +func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { + return &networkPolicies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. +func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. +func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.NetworkPolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networkPolicies. +func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Create(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("networkpolicies"). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Update(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(networkPolicy.Name). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. +func (c *networkPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched networkPolicy. +func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("networkpolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index 8684db45..5315d9b9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index 541bf6a9..ee523f8e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/networking/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go index 863f2d4d..e7acc27e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/node/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go index 52683e20..b38d9aca 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/node/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 020e185e..8b8b22c6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/policy/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -81,7 +80,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index e3855bb9..1bc0179c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index de83531e..efbbc68b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 46718d73..4db94cfa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go index bd7e1e54..5028bac8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/scheduling/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 375f41b8..83bc0b8a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/scheduling/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go index 6feec4ae..373f5cca 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/scheduling/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go index c2a03b96..8d3a8d8e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/settings/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index 92378cf7..1afbe93c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -76,7 +75,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index c52f630a..32d50306 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index e9916bc0..5e12b025 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go new file mode 100644 index 00000000..e121ae41 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to +// MutatingWebhookConfigurationLister. +type MutatingWebhookConfigurationListerExpansion interface{} + +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to +// ValidatingWebhookConfigurationLister. +type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..e2b5da09 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. +type MutatingWebhookConfigurationLister interface { + // List lists all MutatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) + // Get retrieves the MutatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1.MutatingWebhookConfiguration, error) + MutatingWebhookConfigurationListerExpansion +} + +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. +type mutatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { + return &mutatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all MutatingWebhookConfigurations in the indexer. +func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.MutatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the MutatingWebhookConfiguration from the index for a given name. +func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1.MutatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("mutatingwebhookconfiguration"), name) + } + return obj.(*v1.MutatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..33d55e08 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationLister interface { + // List lists all ValidatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) + // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1.ValidatingWebhookConfiguration, error) + ValidatingWebhookConfigurationListerExpansion +} + +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. +type validatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { + return &validatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all ValidatingWebhookConfigurations in the indexer. +func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ValidatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. +func (s *validatingWebhookConfigurationLister) Get(name string) (*v1.ValidatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("validatingwebhookconfiguration"), name) + } + return obj.(*v1.ValidatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go new file mode 100644 index 00000000..706beecf --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/discovery/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EndpointSliceLister helps list EndpointSlices. +type EndpointSliceLister interface { + // List lists all EndpointSlices in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) + // EndpointSlices returns an object that can list and get EndpointSlices. + EndpointSlices(namespace string) EndpointSliceNamespaceLister + EndpointSliceListerExpansion +} + +// endpointSliceLister implements the EndpointSliceLister interface. +type endpointSliceLister struct { + indexer cache.Indexer +} + +// NewEndpointSliceLister returns a new EndpointSliceLister. +func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { + return &endpointSliceLister{indexer: indexer} +} + +// List lists all EndpointSlices in the indexer. +func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.EndpointSlice)) + }) + return ret, err +} + +// EndpointSlices returns an object that can list and get EndpointSlices. +func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { + return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointSliceNamespaceLister helps list and get EndpointSlices. +type EndpointSliceNamespaceLister interface { + // List lists all EndpointSlices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) + // Get retrieves the EndpointSlice from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.EndpointSlice, error) + EndpointSliceNamespaceListerExpansion +} + +// endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister +// interface. +type endpointSliceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all EndpointSlices in the indexer for a given namespace. +func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.EndpointSlice)) + }) + return ret, err +} + +// Get retrieves the EndpointSlice from the indexer for a given namespace and name. +func (s endpointSliceNamespaceLister) Get(name string) (*v1alpha1.EndpointSlice, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("endpointslice"), name) + } + return obj.(*v1alpha1.EndpointSlice), nil +} diff --git a/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..d47af59a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// EndpointSliceListerExpansion allows custom methods to be added to +// EndpointSliceLister. +type EndpointSliceListerExpansion interface{} + +// EndpointSliceNamespaceListerExpansion allows custom methods to be added to +// EndpointSliceNamespaceLister. +type EndpointSliceNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index d5c2a7a7..6d55ae9b 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -26,6 +26,14 @@ type IngressListerExpansion interface{} // IngressNamespaceLister. type IngressNamespaceListerExpansion interface{} +// NetworkPolicyListerExpansion allows custom methods to be added to +// NetworkPolicyLister. +type NetworkPolicyListerExpansion interface{} + +// NetworkPolicyNamespaceListerExpansion allows custom methods to be added to +// NetworkPolicyNamespaceLister. +type NetworkPolicyNamespaceListerExpansion interface{} + // PodSecurityPolicyListerExpansion allows custom methods to be added to // PodSecurityPolicyLister. type PodSecurityPolicyListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go new file mode 100644 index 00000000..782f521a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NetworkPolicyLister helps list NetworkPolicies. +type NetworkPolicyLister interface { + // List lists all NetworkPolicies in the indexer. + List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) + // NetworkPolicies returns an object that can list and get NetworkPolicies. + NetworkPolicies(namespace string) NetworkPolicyNamespaceLister + NetworkPolicyListerExpansion +} + +// networkPolicyLister implements the NetworkPolicyLister interface. +type networkPolicyLister struct { + indexer cache.Indexer +} + +// NewNetworkPolicyLister returns a new NetworkPolicyLister. +func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { + return &networkPolicyLister{indexer: indexer} +} + +// List lists all NetworkPolicies in the indexer. +func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.NetworkPolicy)) + }) + return ret, err +} + +// NetworkPolicies returns an object that can list and get NetworkPolicies. +func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { + return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// NetworkPolicyNamespaceLister helps list and get NetworkPolicies. +type NetworkPolicyNamespaceLister interface { + // List lists all NetworkPolicies in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) + // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. + Get(name string) (*v1beta1.NetworkPolicy, error) + NetworkPolicyNamespaceListerExpansion +} + +// networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister +// interface. +type networkPolicyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all NetworkPolicies in the indexer for a given namespace. +func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.NetworkPolicy)) + }) + return ret, err +} + +// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. +func (s networkPolicyNamespaceLister) Get(name string) (*v1beta1.NetworkPolicy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("networkpolicy"), name) + } + return obj.(*v1beta1.NetworkPolicy), nil +} diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 3f6b9bc2..fb81fb7b 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -94,6 +94,10 @@ type Config struct { // UserAgent is an optional field that specifies the caller of this request. UserAgent string + // DisableCompression bypasses automatic GZip compression requests to the + // server. + DisableCompression bool + // Transport may be used for custom HTTP behavior. This attribute may not // be specified with the TLS client certificate options. Use WrapTransport // to provide additional per-server middleware behavior. @@ -207,6 +211,12 @@ type TLSClientConfig struct { // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). // CAData takes precedence over CAFile CAData []byte + + // NextProtos is a list of supported application level protocols, in order of preference. + // Used to populate tls.Config.NextProtos. + // To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference). + // To use only http/1.1, set to ["http/1.1"]. + NextProtos []string } var _ fmt.Stringer = TLSClientConfig{} @@ -232,6 +242,7 @@ func (c TLSClientConfig) String() string { CertData: c.CertData, KeyData: c.KeyData, CAData: c.CAData, + NextProtos: c.NextProtos, } // Explicitly mark non-empty credential fields as redacted. if len(cc.CertData) != 0 { @@ -487,7 +498,7 @@ func AddUserAgent(config *Config, userAgent string) *Config { return config } -// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed +// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) and custom transports (WrapTransport, Transport) removed func AnonymousClientConfig(config *Config) *Config { // copy only known safe fields return &Config{ @@ -499,15 +510,15 @@ func AnonymousClientConfig(config *Config) *Config { ServerName: config.ServerName, CAFile: config.TLSClientConfig.CAFile, CAData: config.TLSClientConfig.CAData, + NextProtos: config.TLSClientConfig.NextProtos, }, - RateLimiter: config.RateLimiter, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - Timeout: config.Timeout, - Dial: config.Dial, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + Dial: config.Dial, } } @@ -538,14 +549,16 @@ func CopyConfig(config *Config) *Config { CertData: config.TLSClientConfig.CertData, KeyData: config.TLSClientConfig.KeyData, CAData: config.TLSClientConfig.CAData, + NextProtos: config.TLSClientConfig.NextProtos, }, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - RateLimiter: config.RateLimiter, - Timeout: config.Timeout, - Dial: config.Dial, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + RateLimiter: config.RateLimiter, + Timeout: config.Timeout, + Dial: config.Dial, } } diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index 83ef5ae3..0bc2d03f 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -55,7 +55,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { pluginsLock.Lock() defer pluginsLock.Unlock() if _, found := plugins[name]; found { - return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + return fmt.Errorf("auth Provider Plugin %q was registered twice", name) } klog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin @@ -67,7 +67,7 @@ func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig defer pluginsLock.Unlock() p, ok := plugins[apc.Name] if !ok { - return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + return nil, fmt.Errorf("no Auth Provider found for name %q", apc.Name) } return p(clusterAddress, apc.Config, persister) } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index dd063038..556ca66e 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -521,14 +521,24 @@ func (r Request) finalURLTemplate() url.URL { return *url } -func (r *Request) tryThrottle() { +func (r *Request) tryThrottle() error { + if r.throttle == nil { + return nil + } + now := time.Now() - if r.throttle != nil { + var err error + if r.ctx != nil { + err = r.throttle.Wait(r.ctx) + } else { r.throttle.Accept() } + if latency := time.Since(now); latency > longThrottleLatency { klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } + + return err } // Watch attempts to begin watching the requested location. @@ -582,7 +592,7 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) if err != nil { // The watch stream mechanism handles many common partial data errors, so closed // connections can be retried in many cases. - if net.IsProbableEOF(err) { + if net.IsProbableEOF(err) || net.IsTimeout(err) { return watch.NewEmptyWatch(), nil } return nil, err @@ -592,10 +602,15 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) if result := r.transformResponse(resp, req); result.err != nil { return nil, result.err } - return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) + return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) } wrapperDecoder := wrapperDecoderFn(resp.Body) - return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil + return watch.NewStreamWatcher( + restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder), + // use 500 to indicate that the cause of the error is unknown - other error codes + // are more specific to HTTP interactions, and set a reason + errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"), + ), nil } // updateURLMetrics is a convenience function for pushing metrics. @@ -625,13 +640,18 @@ func (r *Request) Stream() (io.ReadCloser, error) { return nil, r.err } - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return nil, err + } url := r.URL().String() req, err := http.NewRequest(r.verb, url, nil) if err != nil { return nil, err } + if r.body != nil { + req.Body = ioutil.NopCloser(r.body) + } if r.ctx != nil { req = req.WithContext(r.ctx) } @@ -727,7 +747,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // We are retrying the request that we already send to apiserver // at least once before. // This request should also be throttled with the client-internal throttler. - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return err + } } resp, err := client.Do(req) updateURLMetrics(r, resp, err) @@ -798,7 +820,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // * http.Client.Do errors are returned directly. func (r *Request) Do() Result { - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return Result{err: err} + } var result Result err := r.request(func(req *http.Request, resp *http.Response) { @@ -812,7 +836,9 @@ func (r *Request) Do() Result { // DoRaw executes the request but does not process the response body. func (r *Request) DoRaw() ([]byte, error) { - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return nil, err + } var result Result err := r.request(func(req *http.Request, resp *http.Response) { @@ -845,13 +871,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) - streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) + streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err) return Result{ err: streamErr, } default: - klog.Errorf("Unexpected error when reading response body: %#v", err) - unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) + klog.Errorf("Unexpected error when reading response body: %v", err) + unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %v", err) return Result{ err: unexpectedErr, } diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index bd5749dc..0800e4ec 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -61,9 +61,10 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // TransportConfig converts a client config to an appropriate transport config. func (c *Config) TransportConfig() (*transport.Config, error) { conf := &transport.Config{ - UserAgent: c.UserAgent, - Transport: c.Transport, - WrapTransport: c.WrapTransport, + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: c.WrapTransport, + DisableCompression: c.DisableCompression, TLS: transport.TLSConfig{ Insecure: c.Insecure, ServerName: c.ServerName, @@ -73,10 +74,12 @@ func (c *Config) TransportConfig() (*transport.Config, error) { CertData: c.CertData, KeyFile: c.KeyFile, KeyData: c.KeyData, + NextProtos: c.NextProtos, }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + BearerTokenFile: c.BearerTokenFile, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, Groups: c.Impersonate.Groups, diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go index 73bb63ad..e95c020b 100644 --- a/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -54,7 +54,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { return "", nil, fmt.Errorf("unable to decode to metav1.Event") } switch got.Type { - case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): + case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark): default: return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) } diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index c1ab45f3..da4a1624 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -38,6 +38,11 @@ func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { *out = make([]byte, len(*in)) copy(*out, *in) } + if in.NextProtos != nil { + in, out := &in.NextProtos, &out.NextProtos + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index e6db578e..f56b34ee 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -439,8 +439,8 @@ func (a ActionImpl) GetSubresource() string { return a.Subresource } func (a ActionImpl) Matches(verb, resource string) bool { - return strings.ToLower(verb) == strings.ToLower(a.Verb) && - strings.ToLower(resource) == strings.ToLower(a.Resource.Resource) + return strings.EqualFold(verb, a.Verb) && + strings.EqualFold(resource, a.Resource.Resource) } func (a ActionImpl) DeepCopy() Action { ret := a diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index 993fcf6a..98f82326 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -18,9 +18,11 @@ package testing import ( "fmt" + "reflect" "sync" jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -139,6 +141,11 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { return true, nil, err } + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + switch action.GetPatchType() { case types.JSONPatchType: patch, err := jsonpatch.DecodePatch(action.GetPatch()) @@ -149,6 +156,7 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { if err != nil { return true, nil, err } + if err = json.Unmarshal(modified, obj); err != nil { return true, nil, err } @@ -310,6 +318,11 @@ func (t *tracker) Add(obj runtime.Object) error { if err != nil { return err } + + if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { + gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} + } + if len(gvks) == 0 { return fmt.Errorf("no registered kinds for %v", obj) } diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go index 20339ab9..c3417267 100644 --- a/vendor/k8s.io/client-go/tools/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -105,7 +105,7 @@ func LoadFromFile(path string) (*Info, error) { // The fields of client.Config with a corresponding field in the Info are set // with the value from the Info. func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) { - var config restclient.Config = c + var config = c config.Username = info.User config.Password = info.Password config.CAFile = info.CAFile @@ -118,6 +118,7 @@ func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) return config, nil } +// Complete returns true if the Kubernetes API authorization info is complete. func (info Info) Complete() bool { return len(info.User) > 0 || len(info.CertFile) > 0 || diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index b5d39252..27a1c52c 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -79,6 +79,7 @@ type controller struct { clock clock.Clock } +// Controller is a generic controller framework. type Controller interface { Run(stopCh <-chan struct{}) HasSynced() bool @@ -130,6 +131,8 @@ func (c *controller) HasSynced() bool { } func (c *controller) LastSyncResourceVersion() string { + c.reflectorMutex.RLock() + defer c.reflectorMutex.RUnlock() if c.reflector == nil { return "" } @@ -149,7 +152,7 @@ func (c *controller) processLoop() { for { obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) if err != nil { - if err == FIFOClosedError { + if err == ErrFIFOClosed { return } if c.config.RetryOnError { diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index f24eec25..b832b4bb 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -160,7 +160,7 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { return f.keyFunc(obj) } -// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() @@ -295,13 +295,6 @@ func isDeletionDup(a, b *Delta) *Delta { return b } -// willObjectBeDeletedLocked returns true only if the last delta for the -// given object is Delete. Caller must lock first. -func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool { - deltas := f.items[id] - return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted -} - // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { @@ -310,13 +303,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err return KeyError{obj, err} } - // If object is supposed to be deleted (last event is Deleted), - // then we should ignore Sync events, because it would result in - // recreation of this object. - if actionType == Sync && f.willObjectBeDeletedLocked(id) { - return nil - } - newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) @@ -389,7 +375,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err return d, exists, nil } -// Checks if the queue is closed +// IsClosed checks if the queue is closed func (f *DeltaFIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() @@ -417,7 +403,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.IsClosed() { - return nil, FIFOClosedError + return nil, ErrFIFOClosed } f.cond.Wait() @@ -593,6 +579,7 @@ type KeyGetter interface { // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string +// Change type definition const ( Added DeltaType = "Added" Updated DeltaType = "Updated" diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index 68d41c8e..14ad492e 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -48,14 +48,14 @@ type ExpirationCache struct { // ExpirationPolicy dictates when an object expires. Currently only abstracted out // so unittests don't rely on the system clock. type ExpirationPolicy interface { - IsExpired(obj *timestampedEntry) bool + IsExpired(obj *TimestampedEntry) bool } // TTLPolicy implements a ttl based ExpirationPolicy. type TTLPolicy struct { // >0: Expire entries with an age > ttl // <=0: Don't expire any entry - Ttl time.Duration + TTL time.Duration // Clock used to calculate ttl expiration Clock clock.Clock @@ -63,26 +63,30 @@ type TTLPolicy struct { // IsExpired returns true if the given object is older than the ttl, or it can't // determine its age. -func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool { - return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl +func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { + return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL } -// timestampedEntry is the only type allowed in a ExpirationCache. -type timestampedEntry struct { - obj interface{} - timestamp time.Time +// TimestampedEntry is the only type allowed in a ExpirationCache. +// Keep in mind that it is not safe to share timestamps between computers. +// Behavior may be inconsistent if you get a timestamp from the API Server and +// use it on the client machine as part of your ExpirationCache. +type TimestampedEntry struct { + Obj interface{} + Timestamp time.Time + key string } -// getTimestampedEntry returns the timestampedEntry stored under the given key. -func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) { +// getTimestampedEntry returns the TimestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { item, _ := c.cacheStorage.Get(key) - if tsEntry, ok := item.(*timestampedEntry); ok { + if tsEntry, ok := item.(*TimestampedEntry); ok { return tsEntry, true } return nil, false } -// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't +// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't // already expired. It holds a write lock across deletion. func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { // Prevent all inserts from the time we deem an item as "expired" to when we @@ -95,11 +99,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) + klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj) c.cacheStorage.Delete(key) return nil, false } - return timestampedItem.obj, true + return timestampedItem.Obj, true } // GetByKey returns the item stored under the key, or sets exists=false. @@ -126,10 +130,8 @@ func (c *ExpirationCache) List() []interface{} { list := make([]interface{}, 0, len(items)) for _, item := range items { - obj := item.(*timestampedEntry).obj - if key, err := c.keyFunc(obj); err != nil { - list = append(list, obj) - } else if obj, exists := c.getOrExpire(key); exists { + key := item.(*TimestampedEntry).key + if obj, exists := c.getOrExpire(key); exists { list = append(list, obj) } } @@ -151,7 +153,7 @@ func (c *ExpirationCache) Add(obj interface{}) error { c.expirationLock.Lock() defer c.expirationLock.Unlock() - c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()}) + c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) return nil } @@ -184,7 +186,7 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er if err != nil { return KeyError{item, err} } - items[key] = ×tampedEntry{item, ts} + items[key] = &TimestampedEntry{item, ts, key} } c.expirationLock.Lock() defer c.expirationLock.Unlock() @@ -199,10 +201,15 @@ func (c *ExpirationCache) Resync() error { // NewTTLStore creates and returns a ExpirationCache with a TTLPolicy func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { + return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) +} + +// NewExpirationStore creates and returns a ExpirationCache for a given policy +func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { return &ExpirationCache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, clock: clock.RealClock{}, - expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}}, + expirationPolicy: expirationPolicy, } } diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index a096765f..33afd32c 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -33,16 +33,19 @@ func (c *fakeThreadSafeMap) Delete(key string) { } } +// FakeExpirationPolicy keeps the list for keys which never expires. type FakeExpirationPolicy struct { NeverExpire sets.String RetrieveKeyFunc KeyFunc } -func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool { +// IsExpired used to check if object is expired. +func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool { key, _ := p.RetrieveKeyFunc(obj) return !p.NeverExpire.Has(key) } +// NewFakeExpirationStore creates a new instance for the ExpirationCache. func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) return &ExpirationCache{ diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go index b59e2eb2..462d2266 100644 --- a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -16,7 +16,7 @@ limitations under the License. package cache -// FakeStore lets you define custom functions for store operations +// FakeCustomStore lets you define custom functions for store operations. type FakeCustomStore struct { AddFunc func(obj interface{}) error UpdateFunc func(obj interface{}) error @@ -25,7 +25,7 @@ type FakeCustomStore struct { ListKeysFunc func() []string GetFunc func(obj interface{}) (item interface{}, exists bool, err error) GetByKeyFunc func(key string) (item interface{}, exists bool, err error) - ReplaceFunc func(list []interface{}, resourceVerion string) error + ReplaceFunc func(list []interface{}, resourceVersion string) error ResyncFunc func() error } diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 508c5530..7a3bc3d3 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -34,7 +34,8 @@ type ErrRequeue struct { Err error } -var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue") +// ErrFIFOClosed used when FIFO is closed +var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue") func (e ErrRequeue) Error() string { if e.Err == nil { @@ -66,7 +67,7 @@ type Queue interface { Close() } -// Helper function for popping from Queue. +// Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. func Pop(queue Queue) interface{} { @@ -126,7 +127,7 @@ func (f *FIFO) Close() { f.cond.Broadcast() } -// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *FIFO) HasSynced() bool { f.lock.Lock() @@ -242,7 +243,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { return item, exists, nil } -// Checks if the queue is closed +// IsClosed checks if the queue is closed func (f *FIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() @@ -267,7 +268,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.IsClosed() { - return nil, FIFOClosedError + return nil, ErrFIFOClosed } f.cond.Wait() diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go index 7357ff97..e503a45a 100644 --- a/vendor/k8s.io/client-go/tools/cache/heap.go +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -28,7 +28,9 @@ const ( closedMsg = "heap is closed" ) +// LessFunc is used to compare two objects in the heap. type LessFunc func(interface{}, interface{}) bool + type heapItem struct { obj interface{} // The object which is stored in the heap. index int // The index of the object's key in the Heap.queue. @@ -158,7 +160,7 @@ func (h *Heap) Add(obj interface{}) error { return nil } -// Adds all the items in the list to the queue and then signals the condition +// BulkAdd adds all the items in the list to the queue and then signals the condition // variable. It is useful when the caller would like to add all of the items // to the queue before consumer starts processing them. func (h *Heap) BulkAdd(list []interface{}) error { @@ -249,11 +251,11 @@ func (h *Heap) Pop() (interface{}, error) { h.cond.Wait() } obj := heap.Pop(h.data) - if obj != nil { - return obj, nil - } else { + if obj == nil { return nil, fmt.Errorf("object was removed from heap data") } + + return obj, nil } // List returns a list of all the items. diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index 15acb168..9fa40683 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -23,17 +23,27 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// Indexer is a storage interface that lets you list objects using multiple indexing functions +// Indexer is a storage interface that lets you list objects using multiple indexing functions. +// There are three kinds of strings here. +// One is a storage key, as defined in the Store interface. +// Another kind is a name of an index. +// The third kind of string is an "indexed value", which is produced by an +// IndexFunc and can be a field value or any other string computed from the object. type Indexer interface { Store - // Retrieve list of objects that match on the named indexing function + // Index returns the stored objects whose set of indexed values + // intersects the set of indexed values of the given object, for + // the named index Index(indexName string, obj interface{}) ([]interface{}, error) - // IndexKeys returns the set of keys that match on the named indexing function. - IndexKeys(indexName, indexKey string) ([]string, error) - // ListIndexFuncValues returns the list of generated values of an Index func + // IndexKeys returns the storage keys of the stored objects whose + // set of indexed values for the named index includes the given + // indexed value + IndexKeys(indexName, indexedValue string) ([]string, error) + // ListIndexFuncValues returns all the indexed values of the given index ListIndexFuncValues(indexName string) []string - // ByIndex lists object that match on the named indexing function with the exact key - ByIndex(indexName, indexKey string) ([]interface{}, error) + // ByIndex returns the stored objects whose set of indexed values + // for the named index includes the given indexed value + ByIndex(indexName, indexedValue string) ([]interface{}, error) // GetIndexer return the indexers GetIndexers() Indexers @@ -42,7 +52,7 @@ type Indexer interface { AddIndexers(newIndexers Indexers) error } -// IndexFunc knows how to provide an indexed value for an object. +// IndexFunc knows how to compute the set of indexed values for an object. type IndexFunc func(obj interface{}) ([]string, error) // IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns @@ -65,6 +75,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { } const ( + // NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field. NamespaceIndex string = "namespace" ) diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 311ff8c4..d649cd73 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -30,6 +30,7 @@ import ( // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) +// ListAll calls appendFn with each value retrieved from store which matches the selector. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() for _, m := range store.List() { @@ -50,6 +51,7 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { return nil } +// ListAllByNamespace used to list items belongs to namespace from Indexer. func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() if namespace == metav1.NamespaceAll { @@ -124,6 +126,7 @@ type GenericNamespaceLister interface { Get(name string) (runtime.Object, error) } +// NewGenericLister creates a new instance for the genericLister. func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { return &genericLister{indexer: indexer, resource: resource} } diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go index 4c6686e9..5d3245a6 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -42,6 +42,7 @@ type MutationCache interface { Mutation(interface{}) } +// ResourceVersionComparator is able to compare object versions. type ResourceVersionComparator interface { CompareResourceVersion(lhs, rhs runtime.Object) int } diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index adb5b8be..9903b292 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -36,12 +36,14 @@ func init() { mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) } -type CacheMutationDetector interface { +// MutationDetector is able to monitor if the object be modified outside. +type MutationDetector interface { AddObject(obj interface{}) Run(stopCh <-chan struct{}) } -func NewCacheMutationDetector(name string) CacheMutationDetector { +// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector. +func NewCacheMutationDetector(name string) MutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } @@ -114,7 +116,7 @@ func (d *defaultCacheMutationDetector) CompareObjects() { altered := false for i, obj := range d.cachedObjs { if !reflect.DeepEqual(obj.cached, obj.copied) { - fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied)) + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) altered = true } } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index c43b7fc5..cdc9b014 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -17,16 +17,13 @@ limitations under the License. package cache import ( + "context" "errors" "fmt" "io" "math/rand" - "net" - "net/url" "reflect" - "strings" "sync" - "syscall" "time" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -35,9 +32,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/naming" + utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/pager" "k8s.io/klog" "k8s.io/utils/trace" ) @@ -68,6 +67,9 @@ type Reflector struct { lastSyncResourceVersion string // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex + // WatchListPageSize is the requested chunk size of initial and resync watch lists. + // Defaults to pager.PageSize. + WatchListPageSize int64 } var ( @@ -79,7 +81,7 @@ var ( // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { - indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc}) + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) return indexer, reflector } @@ -108,11 +110,6 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, return r } -func makeValidPrometheusMetricLabel(in string) string { - // this isn't perfect, but it removes our common characters - return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) -} - // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors var internalPackages = []string{"client-go/tools/cache/"} @@ -167,7 +164,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { options := metav1.ListOptions{ResourceVersion: "0"} if err := func() error { - initTrace := trace.New("Reflector " + r.name + " ListAndWatch") + initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name}) defer initTrace.LogIfLong(10 * time.Second) var list runtime.Object var err error @@ -179,7 +176,16 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { panicCh <- r } }() - list, err = r.listerWatcher.List(options) + // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first + // list request will return the full response. + pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + return r.listerWatcher.List(opts) + })) + if r.WatchListPageSize != 0 { + pager.PageSize = r.WatchListPageSize + } + // Pager falls back to full list if paginated list calls fail due to an "Expired" error. + list, err = pager.List(context.Background(), options) close(listCh) }() select { @@ -257,8 +263,14 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // We want to avoid situations of hanging watchers. Stop any wachers that do not // receive any events within the timeout window. TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, } + // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent + start := r.clock.Now() w, err := r.listerWatcher.Watch(options) if err != nil { switch err { @@ -273,20 +285,21 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // It doesn't make sense to re-list all objects because most likely we will be able to restart // watch where we ended. // If that's the case wait and resend watch request. - if urlError, ok := err.(*url.Error); ok { - if opError, ok := urlError.Err.(*net.OpError); ok { - if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { - time.Sleep(time.Second) - continue - } - } + if utilnet.IsConnectionRefused(err) { + time.Sleep(time.Second) + continue } return nil } - if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { + if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + switch { + case apierrs.IsResourceExpired(err): + klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + default: + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + } } return nil } @@ -303,8 +316,7 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err } // watchHandler watches w and keeps *resourceVersion up to date. -func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { - start := r.clock.Now() +func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { eventCount := 0 // Stopping the watcher should be idempotent and if we return from this function there's no way @@ -354,6 +366,8 @@ loop: if err != nil { utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) } + case watch.Bookmark: + // A `Bookmark` means watch has synced here, just update the resourceVersion default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) } @@ -363,7 +377,7 @@ loop: } } - watchDuration := r.clock.Now().Sub(start) + watchDuration := r.clock.Since(start) if watchDuration < 1*time.Second && eventCount == 0 { return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go index 0945e5c3..dd849c8f 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -94,23 +94,6 @@ var metricsFactory = struct { metricsProvider: noopMetricsProvider{}, } -func newReflectorMetrics(name string) *reflectorMetrics { - var ret *reflectorMetrics - if len(name) == 0 { - return ret - } - return &reflectorMetrics{ - numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name), - listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name), - numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name), - numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name), - numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name), - watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name), - numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name), - lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name), - } -} - // SetReflectorMetricsProvider sets the metrics provider func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { metricsFactory.setProviders.Do(func() { diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index b2f3dba0..f59a0852 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -31,31 +31,118 @@ import ( "k8s.io/klog" ) -// SharedInformer has a shared data cache and is capable of distributing notifications for changes -// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is -// one behavior change compared to a standard Informer. When you receive a notification, the cache -// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend -// on the contents of the cache exactly matching the notification you've received in handler -// functions. If there was a create, followed by a delete, the cache may NOT have your item. This -// has advantages over the broadcaster since it allows us to share a common cache across many -// controllers. Extending the broadcaster would have required us keep duplicate caches for each -// watch. +// SharedInformer provides eventually consistent linkage of its +// clients to the authoritative state of a given collection of +// objects. An object is identified by its API group, kind/resource, +// namespace, and name; the `ObjectMeta.UID` is not part of an +// object's ID as far as this contract is concerned. One +// SharedInformer provides linkage to objects of a particular API +// group and kind/resource. The linked object collection of a +// SharedInformer may be further restricted to one namespace and/or by +// label selector and/or field selector. +// +// The authoritative state of an object is what apiservers provide +// access to, and an object goes through a strict sequence of states. +// An object state is either "absent" or present with a +// ResourceVersion and other appropriate content. +// +// A SharedInformer gets object states from apiservers using a +// sequence of LIST and WATCH operations. Through this sequence the +// apiservers provide a sequence of "collection states" to the +// informer, where each collection state defines the state of every +// object of the collection. No promise --- beyond what is implied by +// other remarks here --- is made about how one informer's sequence of +// collection states relates to a different informer's sequence of +// collection states. +// +// A SharedInformer maintains a local cache, exposed by GetStore() and +// by GetIndexer() in the case of an indexed informer, of the state of +// each relevant object. This cache is eventually consistent with the +// authoritative state. This means that, unless prevented by +// persistent communication problems, if ever a particular object ID X +// is authoritatively associated with a state S then for every +// SharedInformer I whose collection includes (X, S) eventually either +// (1) I's cache associates X with S or a later state of X, (2) I is +// stopped, or (3) the authoritative state service for X terminates. +// To be formally complete, we say that the absent state meets any +// restriction by label selector or field selector. +// +// The local cache starts out empty, and gets populated and updated +// during `Run()`. +// +// As a simple example, if a collection of objects is henceforeth +// unchanging, a SharedInformer is created that links to that +// collection, and that SharedInformer is `Run()` then that +// SharedInformer's cache eventually holds an exact copy of that +// collection (unless it is stopped too soon, the authoritative state +// service ends, or communication problems between the two +// persistently thwart achievement). +// +// As another simple example, if the local cache ever holds a +// non-absent state for some object ID and the object is eventually +// removed from the authoritative state then eventually the object is +// removed from the local cache (unless the SharedInformer is stopped +// too soon, the authoritative state service ends, or communication +// problems persistently thwart the desired result). +// +// The keys in the Store are of the form namespace/name for namespaced +// objects, and are simply the name for non-namespaced objects. +// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for +// a given object, and `SplitMetaNamespaceKey(key)` to split a key +// into its constituent parts. +// +// A client is identified here by a ResourceEventHandler. For every +// update to the SharedInformer's local cache and for every client +// added before `Run()`, eventually either the SharedInformer is +// stopped or the client is notified of the update. A client added +// after `Run()` starts gets a startup batch of notifications of +// additions of the object existing in the cache at the time that +// client was added; also, for every update to the SharedInformer's +// local cache after that client was added, eventually either the +// SharedInformer is stopped or that client is notified of that +// update. Client notifications happen after the corresponding cache +// update and, in the case of a SharedIndexInformer, after the +// corresponding index updates. It is possible that additional cache +// and index updates happen before such a prescribed notification. +// For a given SharedInformer and client, the notifications are +// delivered sequentially. For a given SharedInformer, client, and +// object ID, the notifications are delivered in order. +// +// A client must process each notification promptly; a SharedInformer +// is not engineered to deal well with a large backlog of +// notifications to deliver. Lengthy processing should be passed off +// to something else, for example through a +// `client-go/util/workqueue`. +// +// Each query to an informer's local cache --- whether a single-object +// lookup, a list operation, or a use of one of its indices --- is +// answered entirely from one of the collection states received by +// that informer. +// +// A delete notification exposes the last locally known non-absent +// state, except that its ResourceVersion is replaced with a +// ResourceVersion in which the object is actually absent. type SharedInformer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync // period. Events to a single handler are delivered sequentially, but there is no coordination // between different handlers. AddEventHandler(handler ResourceEventHandler) - // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the - // specified resync period. Events to a single handler are delivered sequentially, but there is - // no coordination between different handlers. + // AddEventHandlerWithResyncPeriod adds an event handler to the + // shared informer using the specified resync period. The resync + // operation consists of delivering to the handler a create + // notification for every object in the informer's local cache; it + // does not add any interactions with the authoritative storage. AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) - // GetStore returns the Store. + // GetStore returns the informer's local cache as a Store. GetStore() Store // GetController gives back a synthetic interface that "votes" to start the informer GetController() Controller - // Run starts the shared informer, which will be stopped when stopCh is closed. + // Run starts and runs the shared informer, returning after it stops. + // The informer will be stopped when stopCh is closed. Run(stopCh <-chan struct{}) - // HasSynced returns true if the shared informer's store has synced. + // HasSynced returns true if the shared informer's store has been + // informed by at least one full LIST of the authoritative state + // of the informer's object collection. This is unrelated to "resync". HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -63,6 +150,7 @@ type SharedInformer interface { LastSyncResourceVersion() string } +// SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { SharedInformer // AddIndexers add indexers to the informer before it starts. @@ -102,10 +190,26 @@ const ( initialBufferSize = 1024 ) +// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages +// indicating that the caller identified by name is waiting for syncs, followed by +// either a successful or failed sync. +func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !WaitForCacheSync(stopCh, cacheSyncs...) { + utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName)) + return false + } + + klog.Infof("Caches are synced for %s ", controllerName) + return true +} + // WaitForCacheSync waits for caches to populate. It returns true if it was successful, false // if the controller should shutdown +// callers should prefer WaitForNamedCacheSync() func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { - err := wait.PollUntil(syncedPollPeriod, + err := wait.PollImmediateUntil(syncedPollPeriod, func() (bool, error) { for _, syncFunc := range cacheSyncs { if !syncFunc() { @@ -129,7 +233,7 @@ type sharedIndexInformer struct { controller Controller processor *sharedProcessor - cacheMutationDetector CacheMutationDetector + cacheMutationDetector MutationDetector // This block is tracked to handle late initialization of the controller listerWatcher ListerWatcher @@ -169,7 +273,7 @@ func (v *dummyController) HasSynced() bool { return v.informer.HasSynced() } -func (c *dummyController) LastSyncResourceVersion() string { +func (v *dummyController) LastSyncResourceVersion() string { return "" } @@ -555,7 +659,7 @@ func (p *processorListener) run() { case deleteNotification: p.handler.OnDelete(notification.oldObj) default: - utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next)) } } // the only way to get here is if the p.nextCh is empty and closed diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 5cbdd17e..e7232514 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -185,7 +185,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro set := index[indexKey] list := make([]interface{}, 0, set.Len()) - for _, key := range set.List() { + for key := range set { list = append(list, c.items[key]) } @@ -292,6 +292,13 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { set := index[indexValue] if set != nil { set.Delete(key) + + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } } } } @@ -302,6 +309,7 @@ func (c *threadSafeMap) Resync() error { return nil } +// NewThreadSafeStore creates a new instance of ThreadSafeStore. func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { return &threadSafeMap{ items: map[string]interface{}{}, diff --git a/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go index 117df46c..220845dd 100644 --- a/vendor/k8s.io/client-go/tools/cache/undelta_store.go +++ b/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -31,6 +31,7 @@ type UndeltaStore struct { // Assert that it implements the Store interface. var _ Store = &UndeltaStore{} +// Add inserts an object into the store and sends complete state by calling PushFunc. // Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. // In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} // and the List. So, the following can happen, resulting in two identical calls to PushFunc. @@ -41,7 +42,6 @@ var _ Store = &UndeltaStore{} // 3 Store.Add(b) // 4 Store.List() -> [a,b] // 5 Store.List() -> [a,b] - func (u *UndeltaStore) Add(obj interface{}) error { if err := u.Store.Add(obj); err != nil { return err @@ -50,6 +50,7 @@ func (u *UndeltaStore) Add(obj interface{}) error { return nil } +// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc. func (u *UndeltaStore) Update(obj interface{}) error { if err := u.Store.Update(obj); err != nil { return err @@ -58,6 +59,7 @@ func (u *UndeltaStore) Update(obj interface{}) error { return nil } +// Delete removes an item from the cache and sends complete state by calling PushFunc. func (u *UndeltaStore) Delete(obj interface{}) error { if err := u.Store.Delete(obj); err != nil { return err @@ -66,6 +68,10 @@ func (u *UndeltaStore) Delete(obj interface{}) error { return nil } +// Replace will delete the contents of current store, using instead the given list. +// 'u' takes ownership of the list, you should not reference the list again +// after calling this function. +// The new contents complete state will be sent by calling PushFunc after replacement. func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { if err := u.Store.Replace(list, resourceVersion); err != nil { return err diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index a7b8c1c6..9c6ac3b5 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -228,6 +228,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) if err != nil { @@ -296,16 +297,6 @@ func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { return config } -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information -func makeServerIdentificationConfig(info clientauth.Info) restclient.Config { - config := restclient.Config{} - config.CAFile = info.CAFile - if info.Insecure != nil { - config.Insecure = *info.Insecure - } - return config -} - func canIdentifyUser(config restclient.Config) bool { return len(config.Username) > 0 || (len(config.CertFile) > 0 || len(config.CertData) > 0) || @@ -499,8 +490,9 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) if server := config.overrides.ClusterInfo.Server; len(server) > 0 { icc.Host = server } - if token := config.overrides.AuthInfo.Token; len(token) > 0 { - icc.BearerToken = token + if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 { + icc.BearerToken = config.overrides.AuthInfo.Token + icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile } if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { icc.TLSClientConfig.CAFile = certificateAuthorityFile diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index 7e928a91..4e37e792 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -127,6 +127,10 @@ type ClientConfigLoadingRules struct { // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. // This should match the overrides passed in to ClientConfig loader. DefaultClientConfig ClientConfig + + // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. + // In case of missing files, it warns the user about the missing files. + WarnIfAllMissing bool } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -136,20 +140,23 @@ var _ ClientConfigLoader = &ClientConfigLoadingRules{} // use this constructor func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { chain := []string{} + warnIfAllMissing := false envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) if len(envVarFiles) != 0 { fileList := filepath.SplitList(envVarFiles) // prevent the same path load multiple times chain = append(chain, deduplicate(fileList)...) + warnIfAllMissing = true } else { chain = append(chain, RecommendedHomeFile) } return &ClientConfigLoadingRules{ - Precedence: chain, - MigrationRules: currentMigrationRules(), + Precedence: chain, + MigrationRules: currentMigrationRules(), + WarnIfAllMissing: warnIfAllMissing, } } @@ -172,6 +179,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } errlist := []error{} + missingList := []string{} kubeConfigFiles := []string{} @@ -195,18 +203,26 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } config, err := LoadFromFile(filename) + if os.IsNotExist(err) { // skip missing files + // Add to the missing list to produce a warning + missingList = append(missingList, filename) continue } + if err != nil { - errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err)) + errlist = append(errlist, fmt.Errorf("error loading config file \"%s\": %v", filename, err)) continue } kubeconfigs = append(kubeconfigs, config) } + if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { + klog.Warningf("Config not found: %s", strings.Join(missingList, ", ")) + } + // first merge all of our maps mapConfig := clientcmdapi.NewConfig() @@ -356,7 +372,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) { if err != nil { return nil, err } - klog.V(6).Infoln("Config loaded from file", filename) + klog.V(6).Infoln("Config loaded from file: ", filename) // set LocationOfOrigin on every Cluster, User, and Context for key, obj := range config.AuthInfos { @@ -467,7 +483,7 @@ func ResolveLocalPaths(config *clientcmdapi.Config) error { } base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) } if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { @@ -480,7 +496,7 @@ func ResolveLocalPaths(config *clientcmdapi.Config) error { } base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) } if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go index 629c0b30..2f927072 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -185,9 +185,10 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) [ } if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } else { + defer clientCertCA.Close() } } @@ -223,16 +224,18 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } else { + defer clientCertFile.Close() } } if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } else { + defer clientKeyFile.Close() } } } @@ -250,8 +253,6 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err for _, v := range authInfo.Exec.Env { if len(v.Name) == 0 { validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) - } else if len(v.Value) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("env variable %s value must be specified for %v to use exec authentication plugin", v.Name, authInfoName)) } } } diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go index 74ea3586..d265db78 100644 --- a/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -25,9 +25,11 @@ import ( metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) const defaultPageSize = 500 +const defaultPageBufferSize = 10 // ListPageFunc returns a list object for the given list options. type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) @@ -48,6 +50,9 @@ type ListPager struct { PageFn ListPageFunc FullListIfExpired bool + + // Number of pages to buffer + PageBufferSize int32 } // New creates a new pager from the provided pager function using the default @@ -58,6 +63,7 @@ func New(fn ListPageFunc) *ListPager { PageSize: defaultPageSize, PageFn: fn, FullListIfExpired: true, + PageBufferSize: defaultPageBufferSize, } } @@ -73,6 +79,12 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti } var list *metainternalversion.List for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + obj, err := p.PageFn(ctx, options) if err != nil { if !errors.IsResourceExpired(err) || !p.FullListIfExpired { @@ -115,3 +127,105 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti options.Continue = m.GetContinue() } } + +// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If +// fn returns an error, processing stops and that error is returned. If fn does not return an error, +// any error encountered while retrieving the list from the server is returned. If the context +// cancels or times out, the context error is returned. Since the list is retrieved in paginated +// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list +// requests exceed the expiration limit of the apiserver being called. +// +// Items are retrieved in chunks from the server to reduce the impact on the server with up to +// ListPager.PageBufferSize chunks buffered concurrently in the background. +func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItem(obj, fn) + }) +} + +// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on +// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does +// not return an error, any error encountered while retrieving the list from the server is +// returned. If the context cancels or times out, the context error is returned. Since the list is +// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if +// the pagination list requests exceed the expiration limit of the apiserver being called. +// +// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background. +func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if p.PageBufferSize < 0 { + return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize) + } + + // Ensure background goroutine is stopped if this call exits before all list items are + // processed. Cancelation error from this deferred cancel call is never returned to caller; + // either the list result has already been sent to bgResultC or the fn error is returned and + // the cancelation error is discarded. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + chunkC := make(chan runtime.Object, p.PageBufferSize) + bgResultC := make(chan error, 1) + go func() { + defer utilruntime.HandleCrash() + + var err error + defer func() { + close(chunkC) + bgResultC <- err + }() + err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error { + select { + case chunkC <- chunk: // buffer the chunk, this can block + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }() + + for o := range chunkC { + err := fn(o) + if err != nil { + return err // any fn error should be returned immediately + } + } + // promote the results of our background goroutine to the foreground + return <-bgResultC +} + +// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list +// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return +// an error, any error encountered while retrieving the list from the server is returned. If the +// context cancels or times out, the context error is returned. Since the list is retrieved in +// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the +// pagination list requests exceed the expiration limit of the apiserver being called. +func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if options.Limit == 0 { + options.Limit = p.PageSize + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + return err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return fmt.Errorf("returned object must be a list: %v", err) + } + if err := fn(obj); err != nil { + return err + } + // if we have no more items, return. + if len(m.GetContinue()) == 0 { + return nil + } + // set the next loop up + options.Continue = m.GetContinue() + } +} diff --git a/vendor/k8s.io/client-go/tools/reference/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go index 573d948a..442a991c 100644 --- a/vendor/k8s.io/client-go/tools/reference/ref.go +++ b/vendor/k8s.io/client-go/tools/reference/ref.go @@ -19,8 +19,6 @@ package reference import ( "errors" "fmt" - "net/url" - "strings" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -30,8 +28,7 @@ import ( var ( // Errors that could be returned by GetReference. - ErrNilObject = errors.New("can't reference a nil object") - ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") + ErrNilObject = errors.New("can't reference a nil object") ) // GetReference returns an ObjectReference which refers to the given @@ -47,20 +44,6 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReferen return ref, nil } - gvk := obj.GetObjectKind().GroupVersionKind() - - // if the object referenced is actually persisted, we can just get kind from meta - // if we are building an object reference to something not yet persisted, we should fallback to scheme - kind := gvk.Kind - if len(kind) == 0 { - // TODO: this is wrong - gvks, _, err := scheme.ObjectKinds(obj) - if err != nil { - return nil, err - } - kind = gvks[0].Kind - } - // An object that implements only List has enough metadata to build a reference var listMeta metav1.Common objectMeta, err := meta.Accessor(obj) @@ -73,29 +56,29 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReferen listMeta = objectMeta } - // if the object referenced is actually persisted, we can also get version from meta - version := gvk.GroupVersion().String() - if len(version) == 0 { - selfLink := listMeta.GetSelfLink() - if len(selfLink) == 0 { - return nil, ErrNoSelfLink - } - selfLinkUrl, err := url.Parse(selfLink) + gvk := obj.GetObjectKind().GroupVersionKind() + + // If object meta doesn't contain data about kind and/or version, + // we are falling back to scheme. + // + // TODO: This doesn't work for CRDs, which are not registered in scheme. + if gvk.Empty() { + gvks, _, err := scheme.ObjectKinds(obj) if err != nil { return nil, err } - // example paths: ///* - parts := strings.Split(selfLinkUrl.Path, "/") - if len(parts) < 4 { - return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) - } - if parts[1] == "api" { - version = parts[2] - } else { - version = parts[2] + "/" + parts[3] + if len(gvks) == 0 || gvks[0].Empty() { + return nil, fmt.Errorf("unexpected gvks registered for object %T: %v", obj, gvks) } + // TODO: The same object can be registered for multiple group versions + // (although in practise this doesn't seem to be used). + // In such case, the version set may not be correct. + gvk = gvks[0] } + kind := gvk.Kind + version := gvk.GroupVersion().String() + // only has list metadata if objectMeta == nil { return &v1.ObjectReference{ diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 7cffe2a5..980d36ae 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -20,6 +20,7 @@ import ( "fmt" "net" "net/http" + "strings" "sync" "time" @@ -39,13 +40,15 @@ const idleConnsPerHost = 25 var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} type tlsCacheKey struct { - insecure bool - caData string - certData string - keyData string - getCert string - serverName string - dial string + insecure bool + caData string + certData string + keyData string + getCert string + serverName string + nextProtos string + dial string + disableCompression bool } func (t tlsCacheKey) String() string { @@ -53,7 +56,7 @@ func (t tlsCacheKey) String() string { if len(t.keyData) > 0 { keyText = "" } - return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial) + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial, t.disableCompression) } func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { @@ -95,6 +98,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { TLSClientConfig: tlsConfig, MaxIdleConnsPerHost: idleConnsPerHost, DialContext: dial, + DisableCompression: config.DisableCompression, }) return c.transports[key], nil } @@ -106,12 +110,14 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) { return tlsCacheKey{}, err } return tlsCacheKey{ - insecure: c.TLS.Insecure, - caData: string(c.TLS.CAData), - certData: string(c.TLS.CertData), - keyData: string(c.TLS.KeyData), - getCert: fmt.Sprintf("%p", c.TLS.GetCert), - serverName: c.TLS.ServerName, - dial: fmt.Sprintf("%p", c.Dial), + insecure: c.TLS.Insecure, + caData: string(c.TLS.CAData), + certData: string(c.TLS.CertData), + keyData: string(c.TLS.KeyData), + getCert: fmt.Sprintf("%p", c.TLS.GetCert), + serverName: c.TLS.ServerName, + nextProtos: strings.Join(c.TLS.NextProtos, ","), + dial: fmt.Sprintf("%p", c.Dial), + disableCompression: c.DisableCompression, }, nil } diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 5de0a2cb..9e18d11d 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -47,6 +47,10 @@ type Config struct { // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig + // DisableCompression bypasses automatic GZip compression requests to the + // server. + DisableCompression bool + // Transport may be used for custom HTTP behavior. This attribute may // not be specified with the TLS client certificate options. Use // WrapTransport for most client level operations. @@ -122,5 +126,11 @@ type TLSConfig struct { CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. + // NextProtos is a list of supported application level protocols, in order of preference. + // Used to populate tls.Config.NextProtos. + // To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference). + // To use only http/1.1, set to ["http/1.1"]. + NextProtos []string + GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 117a9c8c..a272753a 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -80,10 +80,6 @@ func DebugWrappers(rt http.RoundTripper) http.RoundTripper { return rt } -type requestCanceler interface { - CancelRequest(*http.Request) -} - type authProxyRoundTripper struct { username string groups []string @@ -140,11 +136,7 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex } func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -168,11 +160,7 @@ func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -199,11 +187,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -259,11 +243,7 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons } func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegate.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegate) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } @@ -318,11 +298,7 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -402,11 +378,39 @@ func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debug } func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { - canceler.CancelRequest(req) + tryCancelRequest(rt.WrappedRoundTripper(), req) +} + +var knownAuthTypes = map[string]bool{ + "bearer": true, + "basic": true, + "negotiate": true, +} + +// maskValue masks credential content from authorization headers +// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +func maskValue(key string, value string) string { + if !strings.EqualFold(key, "Authorization") { + return value + } + if len(value) == 0 { + return "" + } + var authType string + if i := strings.Index(value, " "); i > 0 { + authType = value[0:i] + } else { + authType = value + } + if !knownAuthTypes[strings.ToLower(authType)] { + return "" + } + if len(value) > len(authType)+1 { + value = authType + " " } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + value = authType } + return value } func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -423,6 +427,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e klog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { + value = maskValue(key, value) klog.Infof(" %s: %s", key, value) } } diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index 8595df27..bb32c3b4 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -25,6 +25,7 @@ import ( "time" "golang.org/x/oauth2" + "k8s.io/klog" ) @@ -59,6 +60,15 @@ func NewCachedFileTokenSource(path string) oauth2.TokenSource { } } +// NewCachedTokenSource returns a oauth2.TokenSource reads a token from a +// designed TokenSource. The ts would provide the source of token. +func NewCachedTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &cachingTokenSource{ + now: time.Now, + base: ts, + } +} + type tokenSourceTransport struct { base http.RoundTripper ort http.RoundTripper @@ -72,6 +82,14 @@ func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, e return tst.ort.RoundTrip(req) } +func (tst *tokenSourceTransport) CancelRequest(req *http.Request) { + if req.Header.Get("Authorization") != "" { + tryCancelRequest(tst.base, req) + return + } + tryCancelRequest(tst.ort, req) +} + type fileTokenSource struct { path string period time.Duration diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 2a145c97..cd8de982 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -23,6 +23,9 @@ import ( "fmt" "io/ioutil" "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/klog" ) // New returns an http.RoundTripper that will provide the authentication @@ -53,7 +56,7 @@ func New(config *Config) (http.RoundTripper, error) { // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(c *Config) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) { + if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0 || len(c.TLS.NextProtos) > 0) { return nil, nil } if c.HasCA() && c.TLS.Insecure { @@ -70,6 +73,7 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { MinVersion: tls.VersionTLS12, InsecureSkipVerify: c.TLS.Insecure, ServerName: c.TLS.ServerName, + NextProtos: c.TLS.NextProtos, } if c.HasCA() { @@ -225,3 +229,17 @@ func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) return b.rt.RoundTrip(req) } } + +func tryCancelRequest(rt http.RoundTripper, req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + switch rt := rt.(type) { + case canceler: + rt.CancelRequest(req) + case utilnet.RoundTripperWrapper: + tryCancelRequest(rt.WrappedRoundTripper(), req) + default: + klog.Warningf("Unable to cancel request for %T", rt) + } +} diff --git a/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/vendor/k8s.io/client-go/util/connrotation/connrotation.go index 235a9e01..f98faee4 100644 --- a/vendor/k8s.io/client-go/util/connrotation/connrotation.go +++ b/vendor/k8s.io/client-go/util/connrotation/connrotation.go @@ -77,11 +77,6 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net. closable := &closableConn{Conn: conn} - // Start tracking the connection - d.mu.Lock() - d.conns[closable] = struct{}{} - d.mu.Unlock() - // When the connection is closed, remove it from the map. This will // be no-op if the connection isn't in the map, e.g. if CloseAll() // is called. @@ -91,6 +86,11 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net. d.mu.Unlock() } + // Start tracking the connection + d.mu.Lock() + d.conns[closable] = struct{}{} + d.mu.Unlock() + return closable, nil } diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index b7cb70ea..c48ba03e 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -30,7 +30,7 @@ type backoffEntry struct { } type Backoff struct { - sync.Mutex + sync.RWMutex Clock clock.Clock defaultDuration time.Duration maxDuration time.Duration @@ -57,8 +57,8 @@ func NewBackOff(initial, max time.Duration) *Backoff { // Get the current backoff Duration func (p *Backoff) Get(id string) time.Duration { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() var delay time.Duration entry, ok := p.perItemBackoff[id] if ok { @@ -90,8 +90,8 @@ func (p *Backoff) Reset(id string) { // Returns True if the elapsed time since eventTime is smaller than the current backoff window func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false @@ -99,13 +99,13 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } - return p.Clock.Now().Sub(eventTime) < entry.backoff + return p.Clock.Since(eventTime) < entry.backoff } // Returns True if time since lastupdate is less than the current backoff window. func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go index e671c044..ffd912c5 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -17,6 +17,8 @@ limitations under the License. package flowcontrol import ( + "context" + "errors" "sync" "time" @@ -33,6 +35,8 @@ type RateLimiter interface { Stop() // QPS returns QPS of this rate limiter QPS() float32 + // Wait returns nil if a token is taken before the Context is done. + Wait(ctx context.Context) error } type tokenBucketRateLimiter struct { @@ -98,6 +102,10 @@ func (t *tokenBucketRateLimiter) QPS() float32 { return t.qps } +func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error { + return t.limiter.Wait(ctx) +} + type fakeAlwaysRateLimiter struct{} func NewFakeAlwaysRateLimiter() RateLimiter { @@ -116,6 +124,10 @@ func (t *fakeAlwaysRateLimiter) QPS() float32 { return 1 } +func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error { + return nil +} + type fakeNeverRateLimiter struct { wg sync.WaitGroup } @@ -141,3 +153,7 @@ func (t *fakeNeverRateLimiter) Accept() { func (t *fakeNeverRateLimiter) QPS() float32 { return 1 } + +func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error { + return errors.New("can not be accept") +} diff --git a/vendor/k8s.io/client-go/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go index 816db57f..3fdbeb8c 100644 --- a/vendor/k8s.io/client-go/util/homedir/homedir.go +++ b/vendor/k8s.io/client-go/util/homedir/homedir.go @@ -18,30 +18,75 @@ package homedir import ( "os" + "path/filepath" "runtime" ) -// HomeDir returns the home directory for the current user +// HomeDir returns the home directory for the current user. +// On Windows: +// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. +// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. +// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. +// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. func HomeDir() string { if runtime.GOOS == "windows" { + home := os.Getenv("HOME") + homeDriveHomePath := "" + if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDriveHomePath = homeDrive + homePath + } + userProfile := os.Getenv("USERPROFILE") - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); len(home) > 0 { - if _, err := os.Stat(home); err == nil { - return home + // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. + // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. + for _, p := range []string{home, homeDriveHomePath, userProfile} { + if len(p) == 0 { + continue } - } - if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { - homeDir := homeDrive + homePath - if _, err := os.Stat(homeDir); err == nil { - return homeDir + if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { + continue } + return p } - if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 { - if _, err := os.Stat(userProfile); err == nil { - return userProfile + + firstSetPath := "" + firstExistingPath := "" + + // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools + for _, p := range []string{home, userProfile, homeDriveHomePath} { + if len(p) == 0 { + continue + } + if len(firstSetPath) == 0 { + // remember the first path that is set + firstSetPath = p + } + info, err := os.Stat(p) + if err != nil { + continue } + if len(firstExistingPath) == 0 { + // remember the first path that exists + firstExistingPath = p + } + if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { + // return first path that is writeable + return p + } + } + + // If none are writeable, return first location that exists + if len(firstExistingPath) > 0 { + return firstExistingPath } + + // If none exist, return first location that is set + if len(firstSetPath) > 0 { + return firstSetPath + } + + // We've got nothing + return "" } return os.Getenv("HOME") } diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go index 3ac0840a..c80ff087 100644 --- a/vendor/k8s.io/client-go/util/retry/util.go +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -42,12 +42,12 @@ var DefaultBackoff = wait.Backoff{ Jitter: 0.1, } -// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting -// write. Callers should preserve previous executions if they wish to retry changes. It performs an +// OnError executes the provided function repeatedly, retrying if the server returns a specified +// error. Callers should preserve previous executions if they wish to retry changes. It performs an // exponential backoff. // // var pod *api.Pod -// err := RetryOnConflict(DefaultBackoff, func() (err error) { +// err := retry.OnError(DefaultBackoff, errors.IsConflict, func() (err error) { // pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) // return // }) @@ -58,14 +58,14 @@ var DefaultBackoff = wait.Backoff{ // ... // // TODO: Make Backoff an interface? -func RetryOnConflict(backoff wait.Backoff, fn func() error) error { +func OnError(backoff wait.Backoff, errorFunc func(error) bool, fn func() error) error { var lastConflictErr error err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := fn() switch { case err == nil: return true, nil - case errors.IsConflict(err): + case errorFunc(err): lastConflictErr = err return false, nil default: @@ -77,3 +77,8 @@ func RetryOnConflict(backoff wait.Backoff, fn func() error) error { } return err } + +// RetryOnConflict executes the function function repeatedly, retrying if the server returns a conflicting +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + return OnError(backoff, errors.IsConflict, fn) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index bd654bf3..07327988 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -18,6 +18,7 @@ package workqueue import ( "container/heap" + "sync" "time" "k8s.io/apimachinery/pkg/util/clock" @@ -43,13 +44,12 @@ func NewNamedDelayingQueue(name string) DelayingInterface { func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { ret := &delayingType{ - Interface: NewNamed(name), - clock: clock, - heartbeat: clock.NewTicker(maxWait), - stopCh: make(chan struct{}), - waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), - deprecatedMetrics: newDeprecatedRetryMetrics(name), + Interface: NewNamed(name), + clock: clock, + heartbeat: clock.NewTicker(maxWait), + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name), } go ret.waitingLoop() @@ -66,6 +66,8 @@ type delayingType struct { // stopCh lets us signal a shutdown to the waiting loop stopCh chan struct{} + // stopOnce guarantees we only signal shutdown a single time + stopOnce sync.Once // heartbeat ensures we wait no more than maxWait before firing heartbeat clock.Ticker @@ -74,8 +76,7 @@ type delayingType struct { waitingForAddCh chan *waitFor // metrics counts the number of retries - metrics retryMetrics - deprecatedMetrics retryMetrics + metrics retryMetrics } // waitFor holds the data to add and the time it should be added @@ -133,11 +134,14 @@ func (pq waitForPriorityQueue) Peek() interface{} { return pq[0] } -// ShutDown gives a way to shut off this queue +// ShutDown stops the queue. After the queue drains, the returned shutdown bool +// on Get() will be true. This method may be invoked more than once. func (q *delayingType) ShutDown() { - q.Interface.ShutDown() - close(q.stopCh) - q.heartbeat.Stop() + q.stopOnce.Do(func() { + q.Interface.ShutDown() + close(q.stopCh) + q.heartbeat.Stop() + }) } // AddAfter adds the given item to the work queue after the given delay @@ -148,7 +152,6 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { } q.metrics.retry() - q.deprecatedMetrics.retry() // immediately add things with no delay if duration <= 0 { diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go index 2a00c74a..a5c976e0 100644 --- a/vendor/k8s.io/client-go/util/workqueue/doc.go +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -23,4 +23,4 @@ limitations under the License. // * Multiple consumers and producers. In particular, it is allowed for an // item to be reenqueued while it is being processed. // * Shutdown notifications. -package workqueue +package workqueue // import "k8s.io/client-go/util/workqueue" diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index be23ddd0..a3911bf2 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -87,14 +87,6 @@ type defaultQueueMetrics struct { // how long have current threads been working? unfinishedWorkSeconds SettableGaugeMetric longestRunningProcessor SettableGaugeMetric - - // TODO(danielqsj): Remove the following metrics, they are deprecated - deprecatedDepth GaugeMetric - deprecatedAdds CounterMetric - deprecatedLatency SummaryMetric - deprecatedWorkDuration SummaryMetric - deprecatedUnfinishedWorkSeconds SettableGaugeMetric - deprecatedLongestRunningProcessor SettableGaugeMetric } func (m *defaultQueueMetrics) add(item t) { @@ -103,9 +95,7 @@ func (m *defaultQueueMetrics) add(item t) { } m.adds.Inc() - m.deprecatedAdds.Inc() m.depth.Inc() - m.deprecatedDepth.Inc() if _, exists := m.addTimes[item]; !exists { m.addTimes[item] = m.clock.Now() } @@ -117,11 +107,9 @@ func (m *defaultQueueMetrics) get(item t) { } m.depth.Dec() - m.deprecatedDepth.Dec() m.processingStartTimes[item] = m.clock.Now() if startTime, exists := m.addTimes[item]; exists { m.latency.Observe(m.sinceInSeconds(startTime)) - m.deprecatedLatency.Observe(m.sinceInMicroseconds(startTime)) delete(m.addTimes, item) } } @@ -133,7 +121,6 @@ func (m *defaultQueueMetrics) done(item t) { if startTime, exists := m.processingStartTimes[item]; exists { m.workDuration.Observe(m.sinceInSeconds(startTime)) - m.deprecatedWorkDuration.Observe(m.sinceInMicroseconds(startTime)) delete(m.processingStartTimes, item) } } @@ -153,9 +140,7 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() { // Convert to seconds; microseconds is unhelpfully granular for this. total /= 1000000 m.unfinishedWorkSeconds.Set(total) - m.deprecatedUnfinishedWorkSeconds.Set(total) m.longestRunningProcessor.Set(oldest / 1000000) - m.deprecatedLongestRunningProcessor.Set(oldest) // in microseconds. } type noMetrics struct{} @@ -200,13 +185,6 @@ type MetricsProvider interface { NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric NewRetriesMetric(name string) CounterMetric - NewDeprecatedDepthMetric(name string) GaugeMetric - NewDeprecatedAddsMetric(name string) CounterMetric - NewDeprecatedLatencyMetric(name string) SummaryMetric - NewDeprecatedWorkDurationMetric(name string) SummaryMetric - NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric - NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric - NewDeprecatedRetriesMetric(name string) CounterMetric } type noopMetricsProvider struct{} @@ -239,34 +217,6 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } -func (_ noopMetricsProvider) NewDeprecatedDepthMetric(name string) GaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedAddsMetric(name string) CounterMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedLatencyMetric(name string) SummaryMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedWorkDurationMetric(name string) SummaryMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedRetriesMetric(name string) CounterMetric { - return noopMetric{} -} - var globalMetricsFactory = queueMetricsFactory{ metricsProvider: noopMetricsProvider{}, } @@ -289,21 +239,15 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu return noMetrics{} } return &defaultQueueMetrics{ - clock: clock, - depth: mp.NewDepthMetric(name), - adds: mp.NewAddsMetric(name), - latency: mp.NewLatencyMetric(name), - workDuration: mp.NewWorkDurationMetric(name), - unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), - longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), - deprecatedDepth: mp.NewDeprecatedDepthMetric(name), - deprecatedAdds: mp.NewDeprecatedAddsMetric(name), - deprecatedLatency: mp.NewDeprecatedLatencyMetric(name), - deprecatedWorkDuration: mp.NewDeprecatedWorkDurationMetric(name), - deprecatedUnfinishedWorkSeconds: mp.NewDeprecatedUnfinishedWorkSecondsMetric(name), - deprecatedLongestRunningProcessor: mp.NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, + clock: clock, + depth: mp.NewDepthMetric(name), + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, } } @@ -317,16 +261,6 @@ func newRetryMetrics(name string) retryMetrics { } } -func newDeprecatedRetryMetrics(name string) retryMetrics { - var ret *defaultRetryMetrics - if len(name) == 0 { - return ret - } - return &defaultRetryMetrics{ - retries: globalMetricsFactory.metricsProvider.NewDeprecatedRetriesMetric(name), - } -} - // SetProvider sets the metrics provider for all subsequently created work // queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go index ad253501..5928a0c5 100644 --- a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go +++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -25,14 +25,6 @@ import ( type DoWorkPieceFunc func(piece int) -// Parallelize is a very simple framework that allows for parallelizing -// N independent pieces of work. -// -// Deprecated: Use ParallelizeUntil instead. -func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { - ParallelizeUntil(nil, workers, pieces, doWorkPiece) -} - // ParallelizeUntil is a framework that allows for parallelizing N // independent pieces of work until done or the context is canceled. func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) { diff --git a/vendor/k8s.io/code-generator/CONTRIBUTING.md b/vendor/k8s.io/code-generator/CONTRIBUTING.md new file mode 100644 index 00000000..76625b7b --- /dev/null +++ b/vendor/k8s.io/code-generator/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/code-generator](https://git.k8s.io/kubernetes/staging/src/k8s.io/code-generator) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information diff --git a/vendor/k8s.io/code-generator/LICENSE b/vendor/k8s.io/code-generator/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/code-generator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS new file mode 100644 index 00000000..6f7abe3e --- /dev/null +++ b/vendor/k8s.io/code-generator/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- lavalamp +- wojtek-t +- sttts +reviewers: +- lavalamp +- wojtek-t +- sttts +labels: +- sig/api-machinery +- area/code-generation diff --git a/vendor/k8s.io/code-generator/README.md b/vendor/k8s.io/code-generator/README.md new file mode 100644 index 00000000..e03c6bf5 --- /dev/null +++ b/vendor/k8s.io/code-generator/README.md @@ -0,0 +1,24 @@ +# code-generator + +Golang code-generators used to implement [Kubernetes-style API types](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). + +## Purpose + +These code-generators can be used +- in the context of [CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to build native, versioned clients, + informers and other helpers +- in the context of [User-provider API Servers](https://github.com/kubernetes/apiserver) to build conversions between internal and versioned types, defaulters, protobuf codecs, + internal and versioned clients and informers. + +## Resources +- The example [sample controller](https://github.com/kubernetes/sample-controller) shows a code example of a controller that uses the clients, listers and informers generated by this library. +- The article [Kubernetes Deep Dive: Code Generation for CustomResources](https://blog.openshift.com/kubernetes-deep-dive-code-generation-customresources/) gives a step by step instruction on how to use this library. + +## Compatibility + +HEAD of this repo will match HEAD of k8s.io/apiserver, k8s.io/apimachinery, and k8s.io/client-go. + +## Where does it come from? + +`code-generator` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/code-generator. +Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. diff --git a/vendor/k8s.io/code-generator/SECURITY_CONTACTS b/vendor/k8s.io/code-generator/SECURITY_CONTACTS new file mode 100644 index 00000000..6df6a4d6 --- /dev/null +++ b/vendor/k8s.io/code-generator/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +joelsmith +liggitt +philips +tallclair diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS new file mode 100644 index 00000000..62866d0b --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- lavalamp +- wojtek-t +- caesarxuchao +reviewers: +- lavalamp +- wojtek-t +- caesarxuchao diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/README.md b/vendor/k8s.io/code-generator/cmd/client-gen/README.md new file mode 100644 index 00000000..092a6115 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/README.md @@ -0,0 +1,4 @@ +See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/sig-api-machinery/generating-clientset.md) + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/staging/src/k8s.io/code-generator/client-gen/README.md?pixel)]() diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go new file mode 100644 index 00000000..f45be1bb --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + + "k8s.io/code-generator/cmd/client-gen/types" + codegenutil "k8s.io/code-generator/pkg/util" +) + +var DefaultInputDirs = []string{} + +// ClientGenArgs is a wrapper for arguments to client-gen. +type CustomArgs struct { + // A sorted list of group versions to generate. For each of them the package path is found + // in GroupVersionToInputPath. + Groups []types.GroupVersions + + // Overrides for which types should be included in the client. + IncludedTypesOverrides map[types.GroupVersion][]string + + // ClientsetName is the name of the clientset to be generated. It's + // populated from command-line arguments. + ClientsetName string + // ClientsetAPIPath is the default API HTTP path for generated clients. + ClientsetAPIPath string + // ClientsetOnly determines if we should generate the clients for groups and + // types along with the clientset. It's populated from command-line + // arguments. + ClientsetOnly bool + // FakeClient determines if client-gen generates the fake clients. + FakeClient bool +} + +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + ClientsetName: "internalclientset", + ClientsetAPIPath: "/apis", + ClientsetOnly: false, + FakeClient: true, + } + genericArgs.CustomArgs = customArgs + genericArgs.InputDirs = DefaultInputDirs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/clientset") + } + + return genericArgs, customArgs +} + +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) { + gvsBuilder := NewGroupVersionsBuilder(&ca.Groups) + pflag.Var(NewGVPackagesValue(gvsBuilder, nil), "input", "group/versions that client-gen will generate clients for. At most one version per group is allowed. Specified in the format \"group1/version1,group2/version2...\".") + pflag.Var(NewGVTypesValue(&ca.IncludedTypesOverrides, []string{}), "included-types-overrides", "list of group/version/type for which client should be generated. By default, client is generated for all types which have genclient in types.go. This overrides that. For each groupVersion in this list, only the types mentioned here will be included. The default check of genclient will be used for other group versions.") + pflag.Var(NewInputBasePathValue(gvsBuilder, inputBase), "input-base", "base path to look for the api group.") + pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", ca.ClientsetName, "the name of the generated clientset package.") + pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", ca.ClientsetAPIPath, "the value of default API HTTP path, starting with / and without trailing /.") + pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", ca.ClientsetOnly, "when set, client-gen only generates the clientset shell, without generating the individual typed clients") + pflag.BoolVar(&ca.FakeClient, "fake-clientset", ca.FakeClient, "when set, client-gen will generate the fake clientset that can be used in tests") + + // support old flags + fs.SetNormalizeFunc(mapFlagName("clientset-path", "output-package", fs.GetNormalizeFunc())) +} + +func Validate(genericArgs *args.GeneratorArgs) error { + customArgs := genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + if len(customArgs.ClientsetName) == 0 { + return fmt.Errorf("clientset name cannot be empty") + } + if len(customArgs.ClientsetAPIPath) == 0 { + return fmt.Errorf("clientset API path cannot be empty") + } + + return nil +} + +// GroupVersionPackages returns a map from GroupVersion to the package with the types.go. +func (ca *CustomArgs) GroupVersionPackages() map[types.GroupVersion]string { + res := map[types.GroupVersion]string{} + for _, pkg := range ca.Groups { + for _, v := range pkg.Versions { + res[types.GroupVersion{Group: pkg.Group, Version: v.Version}] = v.Package + } + } + return res +} + +func mapFlagName(from, to string, old func(fs *pflag.FlagSet, name string) pflag.NormalizedName) func(fs *pflag.FlagSet, name string) pflag.NormalizedName { + return func(fs *pflag.FlagSet, name string) pflag.NormalizedName { + if name == from { + name = to + } + return old(fs, name) + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go new file mode 100644 index 00000000..8da71d6f --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go @@ -0,0 +1,183 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "bytes" + "encoding/csv" + "flag" + "path" + "sort" + "strings" + + "k8s.io/code-generator/cmd/client-gen/types" +) + +type inputBasePathValue struct { + builder *groupVersionsBuilder +} + +var _ flag.Value = &inputBasePathValue{} + +func NewInputBasePathValue(builder *groupVersionsBuilder, def string) *inputBasePathValue { + v := &inputBasePathValue{ + builder: builder, + } + v.Set(def) + return v +} + +func (s *inputBasePathValue) Set(val string) error { + s.builder.importBasePath = val + return s.builder.update() +} + +func (s *inputBasePathValue) Type() string { + return "string" +} + +func (s *inputBasePathValue) String() string { + return s.builder.importBasePath +} + +type gvPackagesValue struct { + builder *groupVersionsBuilder + groups []string + changed bool +} + +func NewGVPackagesValue(builder *groupVersionsBuilder, def []string) *gvPackagesValue { + gvp := new(gvPackagesValue) + gvp.builder = builder + if def != nil { + if err := gvp.set(def); err != nil { + panic(err) + } + } + return gvp +} + +var _ flag.Value = &gvPackagesValue{} + +func (s *gvPackagesValue) set(vs []string) error { + if s.changed { + s.groups = append(s.groups, vs...) + } else { + s.groups = append([]string(nil), vs...) + } + + s.builder.groups = s.groups + return s.builder.update() +} + +func (s *gvPackagesValue) Set(val string) error { + vs, err := readAsCSV(val) + if err != nil { + return err + } + if err := s.set(vs); err != nil { + return err + } + s.changed = true + return nil +} + +func (s *gvPackagesValue) Type() string { + return "stringSlice" +} + +func (s *gvPackagesValue) String() string { + str, _ := writeAsCSV(s.groups) + return "[" + str + "]" +} + +type groupVersionsBuilder struct { + value *[]types.GroupVersions + groups []string + importBasePath string +} + +func NewGroupVersionsBuilder(groups *[]types.GroupVersions) *groupVersionsBuilder { + return &groupVersionsBuilder{ + value: groups, + } +} + +func (p *groupVersionsBuilder) update() error { + var seenGroups = make(map[types.Group]*types.GroupVersions) + for _, v := range p.groups { + pth, gvString := parsePathGroupVersion(v) + gv, err := types.ToGroupVersion(gvString) + if err != nil { + return err + } + + versionPkg := types.PackageVersion{Package: path.Join(p.importBasePath, pth, gv.Group.NonEmpty(), gv.Version.String()), Version: gv.Version} + if group, ok := seenGroups[gv.Group]; ok { + seenGroups[gv.Group].Versions = append(group.Versions, versionPkg) + } else { + seenGroups[gv.Group] = &types.GroupVersions{ + PackageName: gv.Group.NonEmpty(), + Group: gv.Group, + Versions: []types.PackageVersion{versionPkg}, + } + } + } + + var groupNames []string + for groupName := range seenGroups { + groupNames = append(groupNames, groupName.String()) + } + sort.Strings(groupNames) + *p.value = []types.GroupVersions{} + for _, groupName := range groupNames { + *p.value = append(*p.value, *seenGroups[types.Group(groupName)]) + } + + return nil +} + +func parsePathGroupVersion(pgvString string) (gvPath string, gvString string) { + subs := strings.Split(pgvString, "/") + length := len(subs) + switch length { + case 0, 1, 2: + return "", pgvString + default: + return strings.Join(subs[:length-2], "/"), strings.Join(subs[length-2:], "/") + } +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), "\n"), nil +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go new file mode 100644 index 00000000..e4e3ccb5 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "flag" + "fmt" + "strings" + + "k8s.io/code-generator/cmd/client-gen/types" +) + +type gvTypeValue struct { + gvToTypes *map[types.GroupVersion][]string + changed bool +} + +func NewGVTypesValue(gvToTypes *map[types.GroupVersion][]string, def []string) *gvTypeValue { + gvt := new(gvTypeValue) + gvt.gvToTypes = gvToTypes + if def != nil { + if err := gvt.set(def); err != nil { + panic(err) + } + } + return gvt +} + +var _ flag.Value = &gvTypeValue{} + +func (s *gvTypeValue) set(vs []string) error { + if !s.changed { + *s.gvToTypes = map[types.GroupVersion][]string{} + } + + for _, input := range vs { + gvString, typeStr, err := parseGroupVersionType(input) + if err != nil { + return err + } + gv, err := types.ToGroupVersion(gvString) + if err != nil { + return err + } + types, ok := (*s.gvToTypes)[gv] + if !ok { + types = []string{} + } + types = append(types, typeStr) + (*s.gvToTypes)[gv] = types + } + + return nil +} + +func (s *gvTypeValue) Set(val string) error { + vs, err := readAsCSV(val) + if err != nil { + return err + } + if err := s.set(vs); err != nil { + return err + } + s.changed = true + return nil +} + +func (s *gvTypeValue) Type() string { + return "stringSlice" +} + +func (s *gvTypeValue) String() string { + strs := make([]string, 0, len(*s.gvToTypes)) + for gv, ts := range *s.gvToTypes { + for _, t := range ts { + strs = append(strs, gv.Group.String()+"/"+gv.Version.String()+"/"+t) + } + } + str, _ := writeAsCSV(strs) + return "[" + str + "]" +} + +func parseGroupVersionType(gvtString string) (gvString string, typeStr string, err error) { + invalidFormatErr := fmt.Errorf("invalid value: %s, should be of the form group/version/type", gvtString) + subs := strings.Split(gvtString, "/") + length := len(subs) + switch length { + case 2: + // gvtString of the form group/type, e.g. api/Service,extensions/ReplicaSet + return subs[0] + "/", subs[1], nil + case 3: + return strings.Join(subs[:length-1], "/"), subs[length-1], nil + default: + return "", "", invalidFormatErr + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go new file mode 100644 index 00000000..18980744 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -0,0 +1,403 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generators has the generators for the client-gen utility. +package generators + +import ( + "path/filepath" + "strings" + + clientgenargs "k8s.io/code-generator/cmd/client-gen/args" + "k8s.io/code-generator/cmd/client-gen/generators/fake" + "k8s.io/code-generator/cmd/client-gen/generators/scheme" + "k8s.io/code-generator/cmd/client-gen/generators/util" + "k8s.io/code-generator/cmd/client-gen/path" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + codegennamer "k8s.io/code-generator/pkg/namer" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + lowercaseNamer := namer.NewAllLowercasePluralNamer(pluralExceptions) + + publicNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // these exceptions are used to deconflict the generated code + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "EventResource" + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPublicNamer(0), + } + privateNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // these exceptions are used to deconflict the generated code + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "eventResource" + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPrivateNamer(0), + } + publicPluralNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // these exceptions are used to deconflict the generated code + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "EventResource" + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPublicPluralNamer(pluralExceptions), + } + privatePluralNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "eventResource" + // these exceptions are used to deconflict the generated code + "k8s.io/apis/events/v1beta1.Event": "eventResources", + "k8s.io/kubernetes/pkg/apis/events.Event": "eventResources", + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPrivatePluralNamer(pluralExceptions), + } + + return namer.NameSystems{ + "singularKind": namer.NewPublicNamer(0), + "public": publicNamer, + "private": privateNamer, + "raw": namer.NewRawNamer("", nil), + "publicPlural": publicPluralNamer, + "privatePlural": privatePluralNamer, + "allLowercasePlural": lowercaseNamer, + "resource": codegennamer.NewTagOverrideNamer("resourceName", lowercaseNamer), + } +} + +// ExceptionNamer allows you specify exceptional cases with exact names. This allows you to have control +// for handling various conflicts, like group and resource names for instance. +type ExceptionNamer struct { + Exceptions map[string]string + KeyFunc func(*types.Type) string + + Delegate namer.Namer +} + +// Name provides the requested name for a type. +func (n *ExceptionNamer) Name(t *types.Type) string { + key := n.KeyFunc(t) + if exception, ok := n.Exceptions[key]; ok { + return exception + } + return n.Delegate.Name(t) +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, apiPath string, srcTreePath string, inputPackage string, boilerplate []byte) generator.Package { + groupVersionClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) + return &generator.DefaultPackage{ + PackageName: strings.ToLower(gv.Version.NonEmpty()), + PackagePath: groupVersionClientPackage, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// This package has the automatically generated typed clients. +`), + // GeneratorFunc returns a list of generators. Each generator makes a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + } + // Since we want a file per type that we generate a client for, we + // have to provide a function for this. + for _, t := range typeList { + generators = append(generators, &genClientForType{ + DefaultGen: generator.DefaultGen{ + OptionalName: strings.ToLower(c.Namers["private"].Name(t)), + }, + outputPackage: groupVersionClientPackage, + clientsetPackage: clientsetPackage, + group: gv.Group.NonEmpty(), + version: gv.Version.String(), + groupGoName: groupGoName, + typeToMatch: t, + imports: generator.NewImportTracker(), + }) + } + + generators = append(generators, &genGroup{ + DefaultGen: generator.DefaultGen{ + OptionalName: groupPackageName + "_client", + }, + outputPackage: groupVersionClientPackage, + inputPackage: inputPackage, + clientsetPackage: clientsetPackage, + group: gv.Group.NonEmpty(), + version: gv.Version.String(), + groupGoName: groupGoName, + apiPath: apiPath, + types: typeList, + imports: generator.NewImportTracker(), + }) + + expansionFileName := "generated_expansion" + generators = append(generators, &genExpansion{ + groupPackagePath: filepath.Join(srcTreePath, groupVersionClientPackage), + DefaultGen: generator.DefaultGen{ + OptionalName: expansionFileName, + }, + types: typeList, + }) + + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient + }, + } +} + +func packageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { + return &generator.DefaultPackage{ + PackageName: customArgs.ClientsetName, + PackagePath: clientsetPackage, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// This package has the automatically generated clientset. +`), + // GeneratorFunc returns a list of generators. Each generator generates a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + + &genClientset{ + DefaultGen: generator.DefaultGen{ + OptionalName: "clientset", + }, + groups: customArgs.Groups, + groupGoNames: groupGoNames, + clientsetPackage: clientsetPackage, + outputPackage: customArgs.ClientsetName, + imports: generator.NewImportTracker(), + }, + } + return generators + }, + } +} + +func packageForScheme(customArgs *clientgenargs.CustomArgs, clientsetPackage string, srcTreePath string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { + schemePackage := filepath.Join(clientsetPackage, "scheme") + + // create runtime.Registry for internal client because it has to know about group versions + internalClient := false +NextGroup: + for _, group := range customArgs.Groups { + for _, v := range group.Versions { + if v.String() == "" { + internalClient = true + break NextGroup + } + } + } + + return &generator.DefaultPackage{ + PackageName: "scheme", + PackagePath: schemePackage, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// This package contains the scheme of the automatically generated clientset. +`), + // GeneratorFunc returns a list of generators. Each generator generates a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + + &scheme.GenScheme{ + DefaultGen: generator.DefaultGen{ + OptionalName: "register", + }, + InputPackages: customArgs.GroupVersionPackages(), + OutputPackage: schemePackage, + OutputPath: filepath.Join(srcTreePath, schemePackage), + Groups: customArgs.Groups, + GroupGoNames: groupGoNames, + ImportTracker: generator.NewImportTracker(), + CreateRegistry: internalClient, + }, + } + return generators + }, + } +} + +// applyGroupOverrides applies group name overrides to each package, if applicable. If there is a +// comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io", use the +// first field (somegroup) as the name of the group in Go code, e.g. as the func name in a clientset. +// +// If the first field of the groupName is not unique within the clientset, use "// +groupName=unique +func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.CustomArgs) { + // Create a map from "old GV" to "new GV" so we know what changes we need to make. + changes := make(map[clientgentypes.GroupVersion]clientgentypes.GroupVersion) + for gv, inputDir := range customArgs.GroupVersionPackages() { + p := universe.Package(inputDir) + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + newGV := clientgentypes.GroupVersion{ + Group: clientgentypes.Group(override[0]), + Version: gv.Version, + } + changes[gv] = newGV + } + } + + // Modify customArgs.Groups based on the groupName overrides. + newGroups := make([]clientgentypes.GroupVersions, 0, len(customArgs.Groups)) + for _, gvs := range customArgs.Groups { + gv := clientgentypes.GroupVersion{ + Group: gvs.Group, + Version: gvs.Versions[0].Version, // we only need a version, and the first will do + } + if newGV, ok := changes[gv]; ok { + // There's an override, so use it. + newGVS := clientgentypes.GroupVersions{ + PackageName: gvs.PackageName, + Group: newGV.Group, + Versions: gvs.Versions, + } + newGroups = append(newGroups, newGVS) + } else { + // No override. + newGroups = append(newGroups, gvs) + } + } + customArgs.Groups = newGroups +} + +// Packages makes the client package definition. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + customArgs, ok := arguments.CustomArgs.(*clientgenargs.CustomArgs) + if !ok { + klog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") + } + includedTypesOverrides := customArgs.IncludedTypesOverrides + + applyGroupOverrides(context.Universe, customArgs) + + gvToTypes := map[clientgentypes.GroupVersion][]*types.Type{} + groupGoNames := make(map[clientgentypes.GroupVersion]string) + for gv, inputDir := range customArgs.GroupVersionPackages() { + p := context.Universe.Package(path.Vendorless(inputDir)) + + // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as + // the Go group identifier in CamelCase. It defaults + groupGoNames[gv] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) + if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + groupGoNames[gv] = namer.IC(override[0]) + } + + // Package are indexed with the vendor prefix stripped + for n, t := range p.Types { + // filter out types which are not included in user specified overrides. + typesOverride, ok := includedTypesOverrides[gv] + if ok { + found := false + for _, typeStr := range typesOverride { + if typeStr == n { + found = true + break + } + } + if !found { + continue + } + } else { + // User has not specified any override for this group version. + // filter out types which dont have genclient. + if tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)); !tags.GenerateClient { + continue + } + } + if _, found := gvToTypes[gv]; !found { + gvToTypes[gv] = []*types.Type{} + } + gvToTypes[gv] = append(gvToTypes[gv], t) + } + } + + var packageList []generator.Package + clientsetPackage := filepath.Join(arguments.OutputPackagePath, customArgs.ClientsetName) + + packageList = append(packageList, packageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate)) + packageList = append(packageList, packageForScheme(customArgs, clientsetPackage, arguments.OutputBase, groupGoNames, boilerplate)) + if customArgs.FakeClient { + packageList = append(packageList, fake.PackageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate)) + } + + // If --clientset-only=true, we don't regenerate the individual typed clients. + if customArgs.ClientsetOnly { + return generator.Packages(packageList) + } + + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + gvPackages := customArgs.GroupVersionPackages() + for _, group := range customArgs.Groups { + for _, version := range group.Versions { + gv := clientgentypes.GroupVersion{Group: group.Group, Version: version.Version} + types := gvToTypes[gv] + inputPath := gvPackages[gv] + packageList = append(packageList, packageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], customArgs.ClientsetAPIPath, arguments.OutputBase, inputPath, boilerplate)) + if customArgs.FakeClient { + packageList = append(packageList, fake.PackageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], inputPath, boilerplate)) + } + } + } + + return generator.Packages(packageList) +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go new file mode 100644 index 00000000..4b3854be --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go @@ -0,0 +1,130 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" + + clientgenargs "k8s.io/code-generator/cmd/client-gen/args" + scheme "k8s.io/code-generator/cmd/client-gen/generators/scheme" + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" +) + +func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, inputPackage string, boilerplate []byte) generator.Package { + outputPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty()), "fake") + // TODO: should make this a function, called by here and in client-generator.go + realClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) + return &generator.DefaultPackage{ + PackageName: "fake", + PackagePath: outputPackage, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// Package fake has the automatically generated clients. +`), + // GeneratorFunc returns a list of generators. Each generator makes a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + } + // Since we want a file per type that we generate a client for, we + // have to provide a function for this. + for _, t := range typeList { + generators = append(generators, &genFakeForType{ + DefaultGen: generator.DefaultGen{ + OptionalName: "fake_" + strings.ToLower(c.Namers["private"].Name(t)), + }, + outputPackage: outputPackage, + inputPackage: inputPackage, + group: gv.Group.NonEmpty(), + version: gv.Version.String(), + groupGoName: groupGoName, + typeToMatch: t, + imports: generator.NewImportTracker(), + }) + } + + generators = append(generators, &genFakeForGroup{ + DefaultGen: generator.DefaultGen{ + OptionalName: "fake_" + groupPackageName + "_client", + }, + outputPackage: outputPackage, + realClientPackage: realClientPackage, + group: gv.Group.NonEmpty(), + version: gv.Version.String(), + groupGoName: groupGoName, + types: typeList, + imports: generator.NewImportTracker(), + }) + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient + }, + } +} + +func PackageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { + return &generator.DefaultPackage{ + // TODO: we'll generate fake clientset for different release in the future. + // Package name and path are hard coded for now. + PackageName: "fake", + PackagePath: filepath.Join(clientsetPackage, "fake"), + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// This package has the automatically generated fake clientset. +`), + // GeneratorFunc returns a list of generators. Each generator generates a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + + &genClientset{ + DefaultGen: generator.DefaultGen{ + OptionalName: "clientset_generated", + }, + groups: customArgs.Groups, + groupGoNames: groupGoNames, + fakeClientsetPackage: clientsetPackage, + outputPackage: "fake", + imports: generator.NewImportTracker(), + realClientsetPackage: clientsetPackage, + }, + &scheme.GenScheme{ + DefaultGen: generator.DefaultGen{ + OptionalName: "register", + }, + InputPackages: customArgs.GroupVersionPackages(), + OutputPackage: clientsetPackage, + Groups: customArgs.Groups, + GroupGoNames: groupGoNames, + ImportTracker: generator.NewImportTracker(), + PrivateScheme: true, + }, + } + return generators + }, + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go new file mode 100644 index 00000000..55709aab --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -0,0 +1,174 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + "io" + "path/filepath" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// genClientset generates a package for a clientset. +type genClientset struct { + generator.DefaultGen + groups []clientgentypes.GroupVersions + groupGoNames map[clientgentypes.GroupVersion]string + fakeClientsetPackage string + outputPackage string + imports namer.ImportTracker + clientsetGenerated bool + // the import path of the generated real clientset. + realClientsetPackage string +} + +var _ generator.Generator = &genClientset{} + +func (g *genClientset) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +// We only want to call GenerateType() once. +func (g *genClientset) Filter(c *generator.Context, t *types.Type) bool { + ret := !g.clientsetGenerated + g.clientsetGenerated = true + return ret +} + +func (g *genClientset) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + for _, group := range g.groups { + for _, version := range group.Versions { + groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) + fakeGroupClientPackage := filepath.Join(groupClientPackage, "fake") + + groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), groupClientPackage)) + imports = append(imports, fmt.Sprintf("fake%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), fakeGroupClientPackage)) + } + } + // the package that has the clientset Interface + imports = append(imports, fmt.Sprintf("clientset \"%s\"", g.realClientsetPackage)) + // imports for the code in commonTemplate + imports = append(imports, + "k8s.io/client-go/testing", + "k8s.io/client-go/discovery", + "fakediscovery \"k8s.io/client-go/discovery/fake\"", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/watch", + ) + + return +} + +func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + // TODO: We actually don't need any type information to generate the clientset, + // perhaps we can adapt the go2ild framework to this kind of usage. + sw := generator.NewSnippetWriter(w, c, "$", "$") + + allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames) + + sw.Do(common, nil) + sw.Do(checkImpl, nil) + + for _, group := range allGroups { + m := map[string]interface{}{ + "group": group.Group, + "version": group.Version, + "PackageAlias": group.PackageAlias, + "GroupGoName": group.GroupGoName, + "Version": namer.IC(group.Version.String()), + } + + sw.Do(clientsetInterfaceImplTemplate, m) + } + + return sw.Error() +} + +// This part of code is version-independent, unchanging. +var common = ` +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} +` + +var checkImpl = ` +var _ clientset.Interface = &Clientset{} +` + +var clientsetInterfaceImplTemplate = ` +// $.GroupGoName$$.Version$ retrieves the $.GroupGoName$$.Version$Client +func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { + return &fake$.PackageAlias$.Fake$.GroupGoName$$.Version${Fake: &c.Fake} +} +` + +var clientsetInterfaceDefaultVersionImpl = ` +// $.GroupGoName$ retrieves the $.GroupGoName$$.Version$Client +func (c *Clientset) $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { + return &fake$.PackageAlias$.Fake$.GroupGoName$$.Version${Fake: &c.Fake} +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go new file mode 100644 index 00000000..8f4d5785 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go @@ -0,0 +1,130 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + "io" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// genFakeForGroup produces a file for a group client, e.g. ExtensionsClient for the extension group. +type genFakeForGroup struct { + generator.DefaultGen + outputPackage string + realClientPackage string + group string + version string + groupGoName string + // types in this group + types []*types.Type + imports namer.ImportTracker + // If the genGroup has been called. This generator should only execute once. + called bool +} + +var _ generator.Generator = &genFakeForGroup{} + +// We only want to call GenerateType() once per group. +func (g *genFakeForGroup) Filter(c *generator.Context, t *types.Type) bool { + if !g.called { + g.called = true + return true + } + return false +} + +func (g *genFakeForGroup) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genFakeForGroup) Imports(c *generator.Context) (imports []string) { + imports = g.imports.ImportLines() + if len(g.types) != 0 { + imports = append(imports, fmt.Sprintf("%s \"%s\"", strings.ToLower(filepath.Base(g.realClientPackage)), g.realClientPackage)) + } + return imports +} + +func (g *genFakeForGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + m := map[string]interface{}{ + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "Fake": c.Universe.Type(types.Name{Package: "k8s.io/client-go/testing", Name: "Fake"}), + "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "RESTClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClient"}), + } + + sw.Do(groupClientTemplate, m) + for _, t := range g.types { + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + wrapper := map[string]interface{}{ + "type": t, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "realClientPackage": strings.ToLower(filepath.Base(g.realClientPackage)), + } + if tags.NonNamespaced { + sw.Do(getterImplNonNamespaced, wrapper) + continue + } + sw.Do(getterImplNamespaced, wrapper) + } + sw.Do(getRESTClient, m) + return sw.Error() +} + +var groupClientTemplate = ` +type Fake$.GroupGoName$$.Version$ struct { + *$.Fake|raw$ +} +` + +var getterImplNamespaced = ` +func (c *Fake$.GroupGoName$$.Version$) $.type|publicPlural$(namespace string) $.realClientPackage$.$.type|public$Interface { + return &Fake$.type|publicPlural${c, namespace} +} +` + +var getterImplNonNamespaced = ` +func (c *Fake$.GroupGoName$$.Version$) $.type|publicPlural$() $.realClientPackage$.$.type|public$Interface { + return &Fake$.type|publicPlural${c} +} +` + +var getRESTClient = ` +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *Fake$.GroupGoName$$.Version$) RESTClient() $.RESTClientInterface|raw$ { + var ret *$.RESTClient|raw$ + return ret +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go new file mode 100644 index 00000000..f5888aef --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -0,0 +1,479 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "io" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + "k8s.io/code-generator/cmd/client-gen/path" +) + +// genFakeForType produces a file for each top-level type. +type genFakeForType struct { + generator.DefaultGen + outputPackage string + group string + version string + groupGoName string + inputPackage string + typeToMatch *types.Type + imports namer.ImportTracker +} + +var _ generator.Generator = &genFakeForType{} + +// Filter ignores all but one type because we're making a single file per type. +func (g *genFakeForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } + +func (g *genFakeForType) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genFakeForType) Imports(c *generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +// Ideally, we'd like genStatus to return true if there is a subresource path +// registered for "status" in the API server, but we do not have that +// information, so genStatus returns true if the type has a status field. +func genStatus(t *types.Type) bool { + // Default to true if we have a Status member + hasStatus := false + for _, m := range t.Members { + if m.Name == "Status" { + hasStatus = true + break + } + } + + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return hasStatus && !tags.NoStatus +} + +// hasObjectMeta returns true if the type has a ObjectMeta field. +func hasObjectMeta(t *types.Type) bool { + for _, m := range t.Members { + if m.Embedded == true && m.Name == "ObjectMeta" { + return true + } + } + return false +} + +// GenerateType makes the body of a file implementing the individual typed client for type t. +func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + pkg := filepath.Base(t.Name.Package) + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + canonicalGroup := g.group + if canonicalGroup == "core" { + canonicalGroup = "" + } + + groupName := g.group + if g.group == "core" { + groupName = "" + } + + // allow user to define a group name that's different from the one parsed from the directory. + p := c.Universe.Package(path.Vendorless(g.inputPackage)) + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + groupName = override[0] + } + + const pkgClientGoTesting = "k8s.io/client-go/testing" + m := map[string]interface{}{ + "type": t, + "inputType": t, + "resultType": t, + "subresourcePath": "", + "package": pkg, + "Package": namer.IC(pkg), + "namespaced": !tags.NonNamespaced, + "Group": namer.IC(g.group), + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "group": canonicalGroup, + "groupName": groupName, + "version": g.version, + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}), + "GroupVersionResource": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"}), + "GroupVersionKind": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionKind"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), + + "NewRootListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootListAction"}), + "NewListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewListAction"}), + "NewRootGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetAction"}), + "NewGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetAction"}), + "NewRootDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteAction"}), + "NewDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteAction"}), + "NewRootDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteCollectionAction"}), + "NewDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteCollectionAction"}), + "NewRootUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateAction"}), + "NewUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateAction"}), + "NewRootCreateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateAction"}), + "NewCreateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateAction"}), + "NewRootWatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootWatchAction"}), + "NewWatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewWatchAction"}), + "NewCreateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateSubresourceAction"}), + "NewRootCreateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateSubresourceAction"}), + "NewUpdateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateSubresourceAction"}), + "NewGetSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetSubresourceAction"}), + "NewRootGetSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetSubresourceAction"}), + "NewRootUpdateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateSubresourceAction"}), + "NewRootPatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchAction"}), + "NewPatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchAction"}), + "NewRootPatchSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchSubresourceAction"}), + "NewPatchSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchSubresourceAction"}), + "ExtractFromListOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "ExtractFromListOptions"}), + } + + if tags.NonNamespaced { + sw.Do(structNonNamespaced, m) + } else { + sw.Do(structNamespaced, m) + } + + if tags.NoVerbs { + return sw.Error() + } + sw.Do(resource, m) + sw.Do(kind, m) + + if tags.HasVerb("get") { + sw.Do(getTemplate, m) + } + if tags.HasVerb("list") { + if hasObjectMeta(t) { + sw.Do(listUsingOptionsTemplate, m) + } else { + sw.Do(listTemplate, m) + } + } + if tags.HasVerb("watch") { + sw.Do(watchTemplate, m) + } + + if tags.HasVerb("create") { + sw.Do(createTemplate, m) + } + if tags.HasVerb("update") { + sw.Do(updateTemplate, m) + } + if tags.HasVerb("updateStatus") && genStatus(t) { + sw.Do(updateStatusTemplate, m) + } + if tags.HasVerb("delete") { + sw.Do(deleteTemplate, m) + } + if tags.HasVerb("deleteCollection") { + sw.Do(deleteCollectionTemplate, m) + } + if tags.HasVerb("patch") { + sw.Do(patchTemplate, m) + } + + // generate extended client methods + for _, e := range tags.Extensions { + inputType := *t + resultType := *t + if len(e.InputTypeOverride) > 0 { + if name, pkg := e.Input(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + inputType = *newType + } else { + inputType.Name.Name = e.InputTypeOverride + } + } + if len(e.ResultTypeOverride) > 0 { + if name, pkg := e.Result(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + resultType = *newType + } else { + resultType.Name.Name = e.ResultTypeOverride + } + } + m["inputType"] = &inputType + m["resultType"] = &resultType + m["subresourcePath"] = e.SubResourcePath + + if e.HasVerb("get") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, getSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, getTemplate), m) + } + } + + if e.HasVerb("list") { + + sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m) + } + + // TODO: Figure out schemantic for watching a sub-resource. + if e.HasVerb("watch") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchTemplate), m) + } + + if e.HasVerb("create") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, createSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, createTemplate), m) + } + } + + if e.HasVerb("update") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateTemplate), m) + } + } + + // TODO: Figure out schemantic for deleting a sub-resource (what arguments + // are passed, does it need two names? etc. + if e.HasVerb("delete") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, deleteTemplate), m) + } + + if e.HasVerb("patch") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, patchTemplate), m) + } + } + + return sw.Error() +} + +// adjustTemplate adjust the origin verb template using the expansion name. +// TODO: Make the verbs in templates parametrized so the strings.Replace() is +// not needed. +func adjustTemplate(name, verbType, template string) string { + return strings.Replace(template, " "+strings.Title(verbType), " "+name, -1) +} + +// template for the struct that implements the type's interface +var structNamespaced = ` +// Fake$.type|publicPlural$ implements $.type|public$Interface +type Fake$.type|publicPlural$ struct { + Fake *Fake$.GroupGoName$$.Version$ + ns string +} +` + +// template for the struct that implements the type's interface +var structNonNamespaced = ` +// Fake$.type|publicPlural$ implements $.type|public$Interface +type Fake$.type|publicPlural$ struct { + Fake *Fake$.GroupGoName$$.Version$ +} +` + +var resource = ` +var $.type|allLowercasePlural$Resource = $.GroupVersionResource|raw${Group: "$.groupName$", Version: "$.version$", Resource: "$.type|resource$"} +` + +var kind = ` +var $.type|allLowercasePlural$Kind = $.GroupVersionKind|raw${Group: "$.groupName$", Version: "$.version$", Kind: "$.type|singularKind$"} +` + +var listTemplate = ` +// List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors. +func (c *Fake$.type|publicPlural$) List(opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), &$.type|raw$List{}) + $else$Invokes($.NewRootListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), &$.type|raw$List{})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.type|raw$List), err +} +` + +var listUsingOptionsTemplate = ` +// List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors. +func (c *Fake$.type|publicPlural$) List(opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), &$.type|raw$List{}) + $else$Invokes($.NewRootListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), &$.type|raw$List{})$end$ + if obj == nil { + return nil, err + } + + label, _, _ := $.ExtractFromListOptions|raw$(opts) + if label == nil { + label = $.Everything|raw$() + } + list := &$.type|raw$List{ListMeta: obj.(*$.type|raw$List).ListMeta} + for _, item := range obj.(*$.type|raw$List).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} +` + +var getTemplate = ` +// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. +func (c *Fake$.type|publicPlural$) Get(name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewGetAction|raw$($.type|allLowercasePlural$Resource, c.ns, name), &$.resultType|raw${}) + $else$Invokes($.NewRootGetAction|raw$($.type|allLowercasePlural$Resource, name), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var getSubresourceTemplate = ` +// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. +func (c *Fake$.type|publicPlural$) Get($.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${}) + $else$Invokes($.NewRootGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var deleteTemplate = ` +// Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs. +func (c *Fake$.type|publicPlural$) Delete(name string, options *$.DeleteOptions|raw$) error { + _, err := c.Fake. + $if .namespaced$Invokes($.NewDeleteAction|raw$($.type|allLowercasePlural$Resource, c.ns, name), &$.type|raw${}) + $else$Invokes($.NewRootDeleteAction|raw$($.type|allLowercasePlural$Resource, name), &$.type|raw${})$end$ + return err +} +` + +var deleteCollectionTemplate = ` +// DeleteCollection deletes a collection of objects. +func (c *Fake$.type|publicPlural$) DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error { + $if .namespaced$action := $.NewDeleteCollectionAction|raw$($.type|allLowercasePlural$Resource, c.ns, listOptions) + $else$action := $.NewRootDeleteCollectionAction|raw$($.type|allLowercasePlural$Resource, listOptions) + $end$ + _, err := c.Fake.Invokes(action, &$.type|raw$List{}) + return err +} +` +var createTemplate = ` +// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *Fake$.type|publicPlural$) Create($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewCreateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${}) + $else$Invokes($.NewRootCreateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var createSubresourceTemplate = ` +// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *Fake$.type|publicPlural$) Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", c.ns, $.inputType|private$), &$.resultType|raw${}) + $else$Invokes($.NewRootCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var updateTemplate = ` +// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *Fake$.type|publicPlural$) Update($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewUpdateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${}) + $else$Invokes($.NewRootUpdateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var updateSubresourceTemplate = ` +// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *Fake$.type|publicPlural$) Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", c.ns, $.inputType|private$), &$.inputType|raw${}) + $else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var updateStatusTemplate = ` +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *Fake$.type|publicPlural$) UpdateStatus($.type|private$ *$.type|raw$) (*$.type|raw$, error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", c.ns, $.type|private$), &$.type|raw${}) + $else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", $.type|private$), &$.type|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.type|raw$), err +} +` + +var watchTemplate = ` +// Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. +func (c *Fake$.type|publicPlural$) Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { + return c.Fake. + $if .namespaced$InvokesWatch($.NewWatchAction|raw$($.type|allLowercasePlural$Resource, c.ns, opts)) + $else$InvokesWatch($.NewRootWatchAction|raw$($.type|allLowercasePlural$Resource, opts))$end$ +} +` + +var patchTemplate = ` +// Patch applies the patch and returns the patched $.resultType|private$. +func (c *Fake$.type|publicPlural$) Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, subresources... ), &$.resultType|raw${}) + $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, pt, data, subresources...), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go new file mode 100644 index 00000000..f7254343 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -0,0 +1,183 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "io" + "path/filepath" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// genClientset generates a package for a clientset. +type genClientset struct { + generator.DefaultGen + groups []clientgentypes.GroupVersions + groupGoNames map[clientgentypes.GroupVersion]string + clientsetPackage string + outputPackage string + imports namer.ImportTracker + clientsetGenerated bool +} + +var _ generator.Generator = &genClientset{} + +func (g *genClientset) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +// We only want to call GenerateType() once. +func (g *genClientset) Filter(c *generator.Context, t *types.Type) bool { + ret := !g.clientsetGenerated + g.clientsetGenerated = true + return ret +} + +func (g *genClientset) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + for _, group := range g.groups { + for _, version := range group.Versions { + typedClientPath := filepath.Join(g.clientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) + groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), typedClientPath)) + } + } + return +} + +func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + // TODO: We actually don't need any type information to generate the clientset, + // perhaps we can adapt the go2ild framework to this kind of usage. + sw := generator.NewSnippetWriter(w, c, "$", "$") + + allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames) + m := map[string]interface{}{ + "allGroups": allGroups, + "Config": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), + "DefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), + "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "DiscoveryInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryInterface"}), + "DiscoveryClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryClient"}), + "NewDiscoveryClientForConfig": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfig"}), + "NewDiscoveryClientForConfigOrDie": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfigOrDie"}), + "NewDiscoveryClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClient"}), + "flowcontrolNewTokenBucketRateLimiter": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/flowcontrol", Name: "NewTokenBucketRateLimiter"}), + } + sw.Do(clientsetInterface, m) + sw.Do(clientsetTemplate, m) + for _, g := range allGroups { + sw.Do(clientsetInterfaceImplTemplate, g) + } + sw.Do(getDiscoveryTemplate, m) + sw.Do(newClientsetForConfigTemplate, m) + sw.Do(newClientsetForConfigOrDieTemplate, m) + sw.Do(newClientsetForRESTClientTemplate, m) + + return sw.Error() +} + +var clientsetInterface = ` +type Interface interface { + Discovery() $.DiscoveryInterface|raw$ + $range .allGroups$$.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface + $end$ +} +` + +var clientsetTemplate = ` +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *$.DiscoveryClient|raw$ + $range .allGroups$$.LowerCaseGroupGoName$$.Version$ *$.PackageAlias$.$.GroupGoName$$.Version$Client + $end$ +} +` + +var clientsetInterfaceImplTemplate = ` +// $.GroupGoName$$.Version$ retrieves the $.GroupGoName$$.Version$Client +func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { + return c.$.LowerCaseGroupGoName$$.Version$ +} +` + +var getDiscoveryTemplate = ` +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() $.DiscoveryInterface|raw$ { + if c == nil { + return nil + } + return c.DiscoveryClient +} +` + +var newClientsetForConfigTemplate = ` +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *$.Config|raw$) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = $.flowcontrolNewTokenBucketRateLimiter|raw$(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error +$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$, err =$.PackageAlias$.NewForConfig(&configShallowCopy) + if err!=nil { + return nil, err + } +$end$ + cs.DiscoveryClient, err = $.NewDiscoveryClientForConfig|raw$(&configShallowCopy) + if err!=nil { + return nil, err + } + return &cs, nil +} +` + +var newClientsetForConfigOrDieTemplate = ` +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *$.Config|raw$) *Clientset { + var cs Clientset +$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$ =$.PackageAlias$.NewForConfigOrDie(c) +$end$ + cs.DiscoveryClient = $.NewDiscoveryClientForConfigOrDie|raw$(c) + return &cs +} +` + +var newClientsetForRESTClientTemplate = ` +// New creates a new Clientset for the given RESTClient. +func New(c $.RESTClientInterface|raw$) *Clientset { + var cs Clientset +$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$ =$.PackageAlias$.New(c) +$end$ + cs.DiscoveryClient = $.NewDiscoveryClient|raw$(c) + return &cs +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go new file mode 100644 index 00000000..f47c079e --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" +) + +// genExpansion produces a file for a group client, e.g. ExtensionsClient for the extension group. +type genExpansion struct { + generator.DefaultGen + groupPackagePath string + // types in a group + types []*types.Type +} + +// We only want to call GenerateType() once per group. +func (g *genExpansion) Filter(c *generator.Context, t *types.Type) bool { + return len(g.types) == 0 || t == g.types[0] +} + +func (g *genExpansion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + for _, t := range g.types { + if _, err := os.Stat(filepath.Join(g.groupPackagePath, strings.ToLower(t.Name.Name+"_expansion.go"))); os.IsNotExist(err) { + sw.Do(expansionInterfaceTemplate, t) + } + } + return sw.Error() +} + +var expansionInterfaceTemplate = ` +type $.|public$Expansion interface {} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go new file mode 100644 index 00000000..215a0171 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go @@ -0,0 +1,246 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path/filepath" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + "k8s.io/code-generator/cmd/client-gen/path" +) + +// genGroup produces a file for a group client, e.g. ExtensionsClient for the extension group. +type genGroup struct { + generator.DefaultGen + outputPackage string + group string + version string + groupGoName string + apiPath string + // types in this group + types []*types.Type + imports namer.ImportTracker + inputPackage string + clientsetPackage string + // If the genGroup has been called. This generator should only execute once. + called bool +} + +var _ generator.Generator = &genGroup{} + +// We only want to call GenerateType() once per group. +func (g *genGroup) Filter(c *generator.Context, t *types.Type) bool { + if !g.called { + g.called = true + return true + } + return false +} + +func (g *genGroup) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genGroup) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + imports = append(imports, filepath.Join(g.clientsetPackage, "scheme")) + return +} + +func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + apiPath := func(group string) string { + if group == "core" { + return `"/api"` + } + return `"` + g.apiPath + `"` + } + + groupName := g.group + if g.group == "core" { + groupName = "" + } + // allow user to define a group name that's different from the one parsed from the directory. + p := c.Universe.Package(path.Vendorless(g.inputPackage)) + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + groupName = override[0] + } + + m := map[string]interface{}{ + "group": g.group, + "version": g.version, + "groupName": groupName, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "types": g.types, + "apiPath": apiPath(g.group), + "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), + "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), + "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), + "restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), + "restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "restRESTClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientFor"}), + "SchemeGroupVersion": c.Universe.Variable(types.Name{Package: path.Vendorless(g.inputPackage), Name: "SchemeGroupVersion"}), + } + sw.Do(groupInterfaceTemplate, m) + sw.Do(groupClientTemplate, m) + for _, t := range g.types { + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + wrapper := map[string]interface{}{ + "type": t, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + } + if tags.NonNamespaced { + sw.Do(getterImplNonNamespaced, wrapper) + } else { + sw.Do(getterImplNamespaced, wrapper) + } + } + sw.Do(newClientForConfigTemplate, m) + sw.Do(newClientForConfigOrDieTemplate, m) + sw.Do(newClientForRESTClientTemplate, m) + if g.version == "" { + sw.Do(setInternalVersionClientDefaultsTemplate, m) + } else { + sw.Do(setClientDefaultsTemplate, m) + } + sw.Do(getRESTClient, m) + + return sw.Error() +} + +var groupInterfaceTemplate = ` +type $.GroupGoName$$.Version$Interface interface { + RESTClient() $.restRESTClientInterface|raw$ + $range .types$ $.|publicPlural$Getter + $end$ +} +` + +var groupClientTemplate = ` +// $.GroupGoName$$.Version$Client is used to interact with features provided by the $.groupName$ group. +type $.GroupGoName$$.Version$Client struct { + restClient $.restRESTClientInterface|raw$ +} +` + +var getterImplNamespaced = ` +func (c *$.GroupGoName$$.Version$Client) $.type|publicPlural$(namespace string) $.type|public$Interface { + return new$.type|publicPlural$(c, namespace) +} +` + +var getterImplNonNamespaced = ` +func (c *$.GroupGoName$$.Version$Client) $.type|publicPlural$() $.type|public$Interface { + return new$.type|publicPlural$(c) +} +` + +var newClientForConfigTemplate = ` +// NewForConfig creates a new $.GroupGoName$$.Version$Client for the given config. +func NewForConfig(c *$.restConfig|raw$) (*$.GroupGoName$$.Version$Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := $.restRESTClientFor|raw$(&config) + if err != nil { + return nil, err + } + return &$.GroupGoName$$.Version$Client{client}, nil +} +` + +var newClientForConfigOrDieTemplate = ` +// NewForConfigOrDie creates a new $.GroupGoName$$.Version$Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *$.restConfig|raw$) *$.GroupGoName$$.Version$Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} +` + +var getRESTClient = ` +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *$.GroupGoName$$.Version$Client) RESTClient() $.restRESTClientInterface|raw$ { + if c == nil { + return nil + } + return c.restClient +} +` + +var newClientForRESTClientTemplate = ` +// New creates a new $.GroupGoName$$.Version$Client for the given RESTClient. +func New(c $.restRESTClientInterface|raw$) *$.GroupGoName$$.Version$Client { + return &$.GroupGoName$$.Version$Client{c} +} +` + +var setInternalVersionClientDefaultsTemplate = ` +func setConfigDefaults(config *$.restConfig|raw$) error { + config.APIPath = $.apiPath$ + if config.UserAgent == "" { + config.UserAgent = $.restDefaultKubernetesUserAgent|raw$() + } + if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("$.groupName$")[0].Group { + gv := scheme.Scheme.PrioritizedVersionsForGroup("$.groupName$")[0] + config.GroupVersion = &gv + } + config.NegotiatedSerializer = scheme.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + + return nil +} +` + +var setClientDefaultsTemplate = ` +func setConfigDefaults(config *$.restConfig|raw$) error { + gv := $.SchemeGroupVersion|raw$ + config.GroupVersion = &gv + config.APIPath = $.apiPath$ + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = $.restDefaultKubernetesUserAgent|raw$() + } + + return nil +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go new file mode 100644 index 00000000..3e8fc7c4 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go @@ -0,0 +1,599 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// genClientForType produces a file for each top-level type. +type genClientForType struct { + generator.DefaultGen + outputPackage string + clientsetPackage string + group string + version string + groupGoName string + typeToMatch *types.Type + imports namer.ImportTracker +} + +var _ generator.Generator = &genClientForType{} + +// Filter ignores all but one type because we're making a single file per type. +func (g *genClientForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } + +func (g *genClientForType) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genClientForType) Imports(c *generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +// Ideally, we'd like genStatus to return true if there is a subresource path +// registered for "status" in the API server, but we do not have that +// information, so genStatus returns true if the type has a status field. +func genStatus(t *types.Type) bool { + // Default to true if we have a Status member + hasStatus := false + for _, m := range t.Members { + if m.Name == "Status" { + hasStatus = true + break + } + } + return hasStatus && !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).NoStatus +} + +// GenerateType makes the body of a file implementing the individual typed client for type t. +func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + pkg := filepath.Base(t.Name.Package) + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + type extendedInterfaceMethod struct { + template string + args map[string]interface{} + } + extendedMethods := []extendedInterfaceMethod{} + for _, e := range tags.Extensions { + inputType := *t + resultType := *t + // TODO: Extract this to some helper method as this code is copied into + // 2 other places. + if len(e.InputTypeOverride) > 0 { + if name, pkg := e.Input(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + inputType = *newType + } else { + inputType.Name.Name = e.InputTypeOverride + } + } + if len(e.ResultTypeOverride) > 0 { + if name, pkg := e.Result(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + resultType = *newType + } else { + resultType.Name.Name = e.ResultTypeOverride + } + } + var updatedVerbtemplate string + if _, exists := subresourceDefaultVerbTemplates[e.VerbType]; e.IsSubresource() && exists { + updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(subresourceDefaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(") + } else { + updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(defaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(") + } + extendedMethods = append(extendedMethods, extendedInterfaceMethod{ + template: updatedVerbtemplate, + args: map[string]interface{}{ + "type": t, + "inputType": &inputType, + "resultType": &resultType, + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + }, + }) + } + m := map[string]interface{}{ + "type": t, + "inputType": t, + "resultType": t, + "package": pkg, + "Package": namer.IC(pkg), + "namespaced": !tags.NonNamespaced, + "Group": namer.IC(g.group), + "subresource": false, + "subresourcePath": "", + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), + "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "schemeParameterCodec": c.Universe.Variable(types.Name{Package: filepath.Join(g.clientsetPackage, "scheme"), Name: "ParameterCodec"}), + } + + sw.Do(getterComment, m) + if tags.NonNamespaced { + sw.Do(getterNonNamespaced, m) + } else { + sw.Do(getterNamespaced, m) + } + + sw.Do(interfaceTemplate1, m) + if !tags.NoVerbs { + if !genStatus(t) { + tags.SkipVerbs = append(tags.SkipVerbs, "updateStatus") + } + interfaceSuffix := "" + if len(extendedMethods) > 0 { + interfaceSuffix = "\n" + } + sw.Do("\n"+generateInterface(tags)+interfaceSuffix, m) + // add extended verbs into interface + for _, v := range extendedMethods { + sw.Do(v.template+interfaceSuffix, v.args) + } + + } + sw.Do(interfaceTemplate4, m) + + if tags.NonNamespaced { + sw.Do(structNonNamespaced, m) + sw.Do(newStructNonNamespaced, m) + } else { + sw.Do(structNamespaced, m) + sw.Do(newStructNamespaced, m) + } + + if tags.NoVerbs { + return sw.Error() + } + + if tags.HasVerb("get") { + sw.Do(getTemplate, m) + } + if tags.HasVerb("list") { + sw.Do(listTemplate, m) + } + if tags.HasVerb("watch") { + sw.Do(watchTemplate, m) + } + + if tags.HasVerb("create") { + sw.Do(createTemplate, m) + } + if tags.HasVerb("update") { + sw.Do(updateTemplate, m) + } + if tags.HasVerb("updateStatus") { + sw.Do(updateStatusTemplate, m) + } + if tags.HasVerb("delete") { + sw.Do(deleteTemplate, m) + } + if tags.HasVerb("deleteCollection") { + sw.Do(deleteCollectionTemplate, m) + } + if tags.HasVerb("patch") { + sw.Do(patchTemplate, m) + } + + // generate expansion methods + for _, e := range tags.Extensions { + inputType := *t + resultType := *t + if len(e.InputTypeOverride) > 0 { + if name, pkg := e.Input(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + inputType = *newType + } else { + inputType.Name.Name = e.InputTypeOverride + } + } + if len(e.ResultTypeOverride) > 0 { + if name, pkg := e.Result(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + resultType = *newType + } else { + resultType.Name.Name = e.ResultTypeOverride + } + } + m["inputType"] = &inputType + m["resultType"] = &resultType + m["subresourcePath"] = e.SubResourcePath + + if e.HasVerb("get") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, getSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, getTemplate), m) + } + } + + if e.HasVerb("list") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, listSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m) + } + } + + // TODO: Figure out schemantic for watching a sub-resource. + if e.HasVerb("watch") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchTemplate), m) + } + + if e.HasVerb("create") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, createSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, createTemplate), m) + } + } + + if e.HasVerb("update") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateTemplate), m) + } + } + + // TODO: Figure out schemantic for deleting a sub-resource (what arguments + // are passed, does it need two names? etc. + if e.HasVerb("delete") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, deleteTemplate), m) + } + + if e.HasVerb("patch") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, patchTemplate), m) + } + } + + return sw.Error() +} + +// adjustTemplate adjust the origin verb template using the expansion name. +// TODO: Make the verbs in templates parametrized so the strings.Replace() is +// not needed. +func adjustTemplate(name, verbType, template string) string { + return strings.Replace(template, " "+strings.Title(verbType), " "+name, -1) +} + +func generateInterface(tags util.Tags) string { + // need an ordered list here to guarantee order of generated methods. + out := []string{} + for _, m := range util.SupportedVerbs { + if tags.HasVerb(m) { + out = append(out, defaultVerbTemplates[m]) + } + } + return strings.Join(out, "\n") +} + +var subresourceDefaultVerbTemplates = map[string]string{ + "create": `Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (*$.resultType|raw$, error)`, + "list": `List($.type|private$Name string, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, + "update": `Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (*$.resultType|raw$, error)`, + "get": `Get($.type|private$Name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`, +} + +var defaultVerbTemplates = map[string]string{ + "create": `Create(*$.inputType|raw$) (*$.resultType|raw$, error)`, + "update": `Update(*$.inputType|raw$) (*$.resultType|raw$, error)`, + "updateStatus": `UpdateStatus(*$.type|raw$) (*$.type|raw$, error)`, + "delete": `Delete(name string, options *$.DeleteOptions|raw$) error`, + "deleteCollection": `DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error`, + "get": `Get(name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`, + "list": `List(opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, + "watch": `Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error)`, + "patch": `Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error)`, +} + +// group client will implement this interface. +var getterComment = ` +// $.type|publicPlural$Getter has a method to return a $.type|public$Interface. +// A group's client should implement this interface.` + +var getterNamespaced = ` +type $.type|publicPlural$Getter interface { + $.type|publicPlural$(namespace string) $.type|public$Interface +} +` + +var getterNonNamespaced = ` +type $.type|publicPlural$Getter interface { + $.type|publicPlural$() $.type|public$Interface +} +` + +// this type's interface, typed client will implement this interface. +var interfaceTemplate1 = ` +// $.type|public$Interface has methods to work with $.type|public$ resources. +type $.type|public$Interface interface {` + +var interfaceTemplate4 = ` + $.type|public$Expansion +} +` + +// template for the struct that implements the type's interface +var structNamespaced = ` +// $.type|privatePlural$ implements $.type|public$Interface +type $.type|privatePlural$ struct { + client $.RESTClientInterface|raw$ + ns string +} +` + +// template for the struct that implements the type's interface +var structNonNamespaced = ` +// $.type|privatePlural$ implements $.type|public$Interface +type $.type|privatePlural$ struct { + client $.RESTClientInterface|raw$ +} +` + +var newStructNamespaced = ` +// new$.type|publicPlural$ returns a $.type|publicPlural$ +func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { + return &$.type|privatePlural${ + client: c.RESTClient(), + ns: namespace, + } +} +` + +var newStructNonNamespaced = ` +// new$.type|publicPlural$ returns a $.type|publicPlural$ +func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { + return &$.type|privatePlural${ + client: c.RESTClient(), + } +} +` +var listTemplate = ` +// List takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. +func (c *$.type|privatePlural$) List(opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &$.resultType|raw$List{} + err = c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). + Do(). + Into(result) + return +} +` + +var listSubresourceTemplate = ` +// List takes $.type|raw$ name, label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. +func (c *$.type|privatePlural$) List($.type|private$Name string, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &$.resultType|raw$List{} + err = c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$Name). + SubResource("$.subresourcePath$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). + Do(). + Into(result) + return +} +` + +var getTemplate = ` +// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. +func (c *$.type|privatePlural$) Get(name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name(name). + VersionedParams(&options, $.schemeParameterCodec|raw$). + Do(). + Into(result) + return +} +` + +var getSubresourceTemplate = ` +// Get takes name of the $.type|private$, and returns the corresponding $.resultType|raw$ object, and an error if there is any. +func (c *$.type|privatePlural$) Get($.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$Name). + SubResource("$.subresourcePath$"). + VersionedParams(&options, $.schemeParameterCodec|raw$). + Do(). + Into(result) + return +} +` + +var deleteTemplate = ` +// Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs. +func (c *$.type|privatePlural$) Delete(name string, options *$.DeleteOptions|raw$) error { + return c.client.Delete(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name(name). + Body(options). + Do(). + Error() +} +` + +var deleteCollectionTemplate = ` +// DeleteCollection deletes a collection of objects. +func (c *$.type|privatePlural$) DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil{ + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + VersionedParams(&listOptions, $.schemeParameterCodec|raw$). + Timeout(timeout). + Body(options). + Do(). + Error() +} +` + +var createSubresourceTemplate = ` +// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Post(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$Name). + SubResource("$.subresourcePath$"). + Body($.inputType|private$). + Do(). + Into(result) + return +} +` + +var createTemplate = ` +// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) Create($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Post(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Body($.inputType|private$). + Do(). + Into(result) + return +} +` + +var updateSubresourceTemplate = ` +// Update takes the top resource name and the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Put(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$Name). + SubResource("$.subresourcePath$"). + Body($.inputType|private$). + Do(). + Into(result) + return +} +` + +var updateTemplate = ` +// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) Update($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Put(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.inputType|private$.Name). + Body($.inputType|private$). + Do(). + Into(result) + return +} +` + +var updateStatusTemplate = ` +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *$.type|privatePlural$) UpdateStatus($.type|private$ *$.type|raw$) (result *$.type|raw$, err error) { + result = &$.type|raw${} + err = c.client.Put(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$.Name). + SubResource("status"). + Body($.type|private$). + Do(). + Into(result) + return +} +` + +var watchTemplate = ` +// Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. +func (c *$.type|privatePlural$) Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). + Watch() +} +` + +var patchTemplate = ` +// Patch applies the patch and returns the patched $.resultType|private$. +func (c *$.type|privatePlural$) Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Patch(pt). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go new file mode 100644 index 00000000..a698a28b --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go @@ -0,0 +1,186 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/code-generator/cmd/client-gen/path" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// GenScheme produces a package for a clientset with the scheme, codecs and parameter codecs. +type GenScheme struct { + generator.DefaultGen + OutputPackage string + Groups []clientgentypes.GroupVersions + GroupGoNames map[clientgentypes.GroupVersion]string + InputPackages map[clientgentypes.GroupVersion]string + OutputPath string + ImportTracker namer.ImportTracker + PrivateScheme bool + CreateRegistry bool + schemeGenerated bool +} + +func (g *GenScheme) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.OutputPackage, g.ImportTracker), + } +} + +// We only want to call GenerateType() once. +func (g *GenScheme) Filter(c *generator.Context, t *types.Type) bool { + ret := !g.schemeGenerated + g.schemeGenerated = true + return ret +} + +func (g *GenScheme) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.ImportTracker.ImportLines()...) + for _, group := range g.Groups { + for _, version := range group.Versions { + packagePath := g.InputPackages[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}] + groupAlias := strings.ToLower(g.GroupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) + if g.CreateRegistry { + // import the install package for internal clientsets instead of the type package with register.go + if version.Version != "" { + packagePath = filepath.Dir(packagePath) + } + packagePath = filepath.Join(packagePath, "install") + + imports = append(imports, fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath))) + break + } else { + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.Version.NonEmpty()), path.Vendorless(packagePath))) + } + } + } + return +} + +func (g *GenScheme) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + allGroupVersions := clientgentypes.ToGroupVersionInfo(g.Groups, g.GroupGoNames) + allInstallGroups := clientgentypes.ToGroupInstallPackages(g.Groups, g.GroupGoNames) + + m := map[string]interface{}{ + "allGroupVersions": allGroupVersions, + "allInstallGroups": allInstallGroups, + "customRegister": false, + "runtimeNewParameterCodec": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "NewParameterCodec"}), + "runtimeNewScheme": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "NewScheme"}), + "serializerNewCodecFactory": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/serializer", Name: "NewCodecFactory"}), + "runtimeScheme": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Scheme"}), + "runtimeSchemeBuilder": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "SchemeBuilder"}), + "runtimeUtilMust": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/util/runtime", Name: "Must"}), + "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), + "metav1AddToGroupVersion": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "AddToGroupVersion"}), + } + globals := map[string]string{ + "Scheme": "Scheme", + "Codecs": "Codecs", + "ParameterCodec": "ParameterCodec", + "Registry": "Registry", + } + for k, v := range globals { + if g.PrivateScheme { + m[k] = strings.ToLower(v[0:1]) + v[1:] + } else { + m[k] = v + } + } + + sw.Do(globalsTemplate, m) + + if g.OutputPath != "" { + if _, err := os.Stat(filepath.Join(g.OutputPath, strings.ToLower("register_custom.go"))); err == nil { + m["customRegister"] = true + } + } + + if g.CreateRegistry { + sw.Do(registryRegistration, m) + } else { + sw.Do(simpleRegistration, m) + } + + return sw.Error() +} + +var globalsTemplate = ` +var $.Scheme$ = $.runtimeNewScheme|raw$() +var $.Codecs$ = $.serializerNewCodecFactory|raw$($.Scheme$) +var $.ParameterCodec$ = $.runtimeNewParameterCodec|raw$($.Scheme$)` + +var registryRegistration = ` + +func init() { + $.metav1AddToGroupVersion|raw$($.Scheme$, $.schemaGroupVersion|raw${Version: "v1"}) + Install($.Scheme$) +} + +// Install registers the API group and adds types to a scheme +func Install(scheme *$.runtimeScheme|raw$) { + $- range .allInstallGroups$ + $.InstallPackageAlias$.Install(scheme) + $- end$ + $if .customRegister$ + ExtraInstall(scheme) + $end -$ +} +` + +var simpleRegistration = ` +var localSchemeBuilder = $.runtimeSchemeBuilder|raw${ + $- range .allGroupVersions$ + $.PackageAlias$.AddToScheme, + $- end$ + $if .customRegister$ + ExtraAddToScheme, + $end -$ +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + $.metav1AddToGroupVersion|raw$($.Scheme$, $.schemaGroupVersion|raw${Version: "v1"}) + $.runtimeUtilMust|raw$(AddToScheme($.Scheme$)) +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go new file mode 100644 index 00000000..0b7d68ca --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go @@ -0,0 +1,341 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "fmt" + "strings" + + "k8s.io/gengo/types" +) + +var supportedTags = []string{ + "genclient", + "genclient:nonNamespaced", + "genclient:noVerbs", + "genclient:onlyVerbs", + "genclient:skipVerbs", + "genclient:noStatus", + "genclient:readonly", + "genclient:method", +} + +// SupportedVerbs is a list of supported verbs for +onlyVerbs and +skipVerbs. +var SupportedVerbs = []string{ + "create", + "update", + "updateStatus", + "delete", + "deleteCollection", + "get", + "list", + "watch", + "patch", +} + +// ReadonlyVerbs represents a list of read-only verbs. +var ReadonlyVerbs = []string{ + "get", + "list", + "watch", +} + +// genClientPrefix is the default prefix for all genclient tags. +const genClientPrefix = "genclient:" + +// unsupportedExtensionVerbs is a list of verbs we don't support generating +// extension client functions for. +var unsupportedExtensionVerbs = []string{ + "updateStatus", + "deleteCollection", + "watch", + "delete", +} + +// inputTypeSupportedVerbs is a list of verb types that supports overriding the +// input argument type. +var inputTypeSupportedVerbs = []string{ + "create", + "update", +} + +// resultTypeSupportedVerbs is a list of verb types that supports overriding the +// resulting type. +var resultTypeSupportedVerbs = []string{ + "create", + "update", + "get", + "list", + "patch", +} + +// Extensions allows to extend the default set of client verbs +// (CRUD+watch+patch+list+deleteCollection) for a given type with custom defined +// verbs. Custom verbs can have custom input and result types and also allow to +// use a sub-resource in a request instead of top-level resource type. +// +// Example: +// +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +// type ReplicaSet struct { ... } +// +// The 'method=UpdateScale' is the name of the client function. +// The 'verb=update' here means the client function will use 'PUT' action. +// The 'subresource=scale' means we will use SubResource template to generate this client function. +// The 'input' is the input type used for creation (function argument). +// The 'result' (not needed in this case) is the result type returned from the +// client function. +// +type extension struct { + // VerbName is the name of the custom verb (Scale, Instantiate, etc..) + VerbName string + // VerbType is the type of the verb (only verbs from SupportedVerbs are + // supported) + VerbType string + // SubResourcePath defines a path to a sub-resource to use in the request. + // (optional) + SubResourcePath string + // InputTypeOverride overrides the input parameter type for the verb. By + // default the original type is used. Overriding the input type only works for + // "create" and "update" verb types. The given type must exists in the same + // package as the original type. + // (optional) + InputTypeOverride string + // ResultTypeOverride overrides the resulting object type for the verb. By + // default the original type is used. Overriding the result type works. + // (optional) + ResultTypeOverride string +} + +// IsSubresource indicates if this extension should generate the sub-resource. +func (e *extension) IsSubresource() bool { + return len(e.SubResourcePath) > 0 +} + +// HasVerb checks if the extension matches the given verb. +func (e *extension) HasVerb(verb string) bool { + return e.VerbType == verb +} + +// Input returns the input override package path and the type. +func (e *extension) Input() (string, string) { + parts := strings.Split(e.InputTypeOverride, ".") + return parts[len(parts)-1], strings.Join(parts[0:len(parts)-1], ".") +} + +// Result returns the result override package path and the type. +func (e *extension) Result() (string, string) { + parts := strings.Split(e.ResultTypeOverride, ".") + return parts[len(parts)-1], strings.Join(parts[0:len(parts)-1], ".") +} + +// Tags represents a genclient configuration for a single type. +type Tags struct { + // +genclient + GenerateClient bool + // +genclient:nonNamespaced + NonNamespaced bool + // +genclient:noStatus + NoStatus bool + // +genclient:noVerbs + NoVerbs bool + // +genclient:skipVerbs=get,update + // +genclient:onlyVerbs=create,delete + SkipVerbs []string + // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale + Extensions []extension +} + +// HasVerb returns true if we should include the given verb in final client interface and +// generate the function for it. +func (t Tags) HasVerb(verb string) bool { + if len(t.SkipVerbs) == 0 { + return true + } + for _, s := range t.SkipVerbs { + if verb == s { + return false + } + } + return true +} + +// MustParseClientGenTags calls ParseClientGenTags but instead of returning error it panics. +func MustParseClientGenTags(lines []string) Tags { + tags, err := ParseClientGenTags(lines) + if err != nil { + panic(err.Error()) + } + return tags +} + +// ParseClientGenTags parse the provided genclient tags and validates that no unknown +// tags are provided. +func ParseClientGenTags(lines []string) (Tags, error) { + ret := Tags{} + values := types.ExtractCommentTags("+", lines) + value := []string{} + value, ret.GenerateClient = values["genclient"] + // Check the old format and error when used to avoid generating client when //+genclient=false + if len(value) > 0 && len(value[0]) > 0 { + return ret, fmt.Errorf("+genclient=%s is invalid, use //+genclient if you want to generate client or omit it when you want to disable generation", value) + } + _, ret.NonNamespaced = values[genClientPrefix+"nonNamespaced"] + // Check the old format and error when used + if value := values["nonNamespaced"]; len(value) > 0 && len(value[0]) > 0 { + return ret, fmt.Errorf("+nonNamespaced=%s is invalid, use //+genclient:nonNamespaced instead", value[0]) + } + _, ret.NoVerbs = values[genClientPrefix+"noVerbs"] + _, ret.NoStatus = values[genClientPrefix+"noStatus"] + onlyVerbs := []string{} + if _, isReadonly := values[genClientPrefix+"readonly"]; isReadonly { + onlyVerbs = ReadonlyVerbs + } + // Check the old format and error when used + if value := values["readonly"]; len(value) > 0 && len(value[0]) > 0 { + return ret, fmt.Errorf("+readonly=%s is invalid, use //+genclient:readonly instead", value[0]) + } + if v, exists := values[genClientPrefix+"skipVerbs"]; exists { + ret.SkipVerbs = strings.Split(v[0], ",") + } + if v, exists := values[genClientPrefix+"onlyVerbs"]; exists || len(onlyVerbs) > 0 { + if len(v) > 0 { + onlyVerbs = append(onlyVerbs, strings.Split(v[0], ",")...) + } + skipVerbs := []string{} + for _, m := range SupportedVerbs { + skip := true + for _, o := range onlyVerbs { + if o == m { + skip = false + break + } + } + // Check for conflicts + for _, v := range skipVerbs { + if v == m { + return ret, fmt.Errorf("verb %q used both in genclient:skipVerbs and genclient:onlyVerbs", v) + } + } + if skip { + skipVerbs = append(skipVerbs, m) + } + } + ret.SkipVerbs = skipVerbs + } + var err error + if ret.Extensions, err = parseClientExtensions(values); err != nil { + return ret, err + } + return ret, validateClientGenTags(values) +} + +func parseClientExtensions(tags map[string][]string) ([]extension, error) { + var ret []extension + for name, values := range tags { + if !strings.HasPrefix(name, genClientPrefix+"method") { + continue + } + for _, value := range values { + // the value comes in this form: "Foo,verb=create" + ext := extension{} + parts := strings.Split(value, ",") + if len(parts) == 0 { + return nil, fmt.Errorf("invalid of empty extension verb name: %q", value) + } + // The first part represents the name of the extension + ext.VerbName = parts[0] + if len(ext.VerbName) == 0 { + return nil, fmt.Errorf("must specify a verb name (// +genclient:method=Foo,verb=create)") + } + // Parse rest of the arguments + params := parts[1:] + for _, p := range params { + parts := strings.Split(p, "=") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid extension tag specification %q", p) + } + key, val := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + if len(val) == 0 { + return nil, fmt.Errorf("empty value of %q for %q extension", key, ext.VerbName) + } + switch key { + case "verb": + ext.VerbType = val + case "subresource": + ext.SubResourcePath = val + case "input": + ext.InputTypeOverride = val + case "result": + ext.ResultTypeOverride = val + default: + return nil, fmt.Errorf("unknown extension configuration key %q", key) + } + } + // Validate resulting extension configuration + if len(ext.VerbType) == 0 { + return nil, fmt.Errorf("verb type must be specified (use '// +genclient:method=%s,verb=create')", ext.VerbName) + } + if len(ext.ResultTypeOverride) > 0 { + supported := false + for _, v := range resultTypeSupportedVerbs { + if ext.VerbType == v { + supported = true + break + } + } + if !supported { + return nil, fmt.Errorf("%s: result type is not supported for %q verbs (supported verbs: %#v)", ext.VerbName, ext.VerbType, resultTypeSupportedVerbs) + } + } + if len(ext.InputTypeOverride) > 0 { + supported := false + for _, v := range inputTypeSupportedVerbs { + if ext.VerbType == v { + supported = true + break + } + } + if !supported { + return nil, fmt.Errorf("%s: input type is not supported for %q verbs (supported verbs: %#v)", ext.VerbName, ext.VerbType, inputTypeSupportedVerbs) + } + } + for _, t := range unsupportedExtensionVerbs { + if ext.VerbType == t { + return nil, fmt.Errorf("verb %q is not supported by extension generator", ext.VerbType) + } + } + ret = append(ret, ext) + } + } + return ret, nil +} + +// validateTags validates that only supported genclient tags were provided. +func validateClientGenTags(values map[string][]string) error { + for _, k := range supportedTags { + delete(values, k) + } + for key := range values { + if strings.HasPrefix(key, strings.TrimSuffix(genClientPrefix, ":")) { + return errors.New("unknown tag detected: " + key) + } + } + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/main.go b/vendor/k8s.io/code-generator/cmd/client-gen/main.go new file mode 100644 index 00000000..6e0d187f --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/main.go @@ -0,0 +1,66 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// client-gen makes the individual typed clients using gengo. +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/client-gen/args" + "k8s.io/code-generator/cmd/client-gen/generators" + "k8s.io/code-generator/pkg/util" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of client-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/clientset_generated/" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + // add group version package as input dirs for gengo + for _, pkg := range customArgs.Groups { + for _, v := range pkg.Versions { + genericArgs.InputDirs = append(genericArgs.InputDirs, v.Package) + } + } + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go b/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go new file mode 100644 index 00000000..19b269bd --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go @@ -0,0 +1,31 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import "strings" + +// Vendorless removes the longest match of "*/vendor/" from the front of p. +// It is useful if a package locates in vendor/, e.g., +// k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1, because gengo +// indexes the package with its import path, e.g., +// k8s.io/apimachinery/pkg/apis/meta/v1, +func Vendorless(p string) string { + if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { + return p[pos+len("/vendor/"):] + } + return p +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go new file mode 100644 index 00000000..59f2fd44 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -0,0 +1,121 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "k8s.io/gengo/namer" +) + +// ToGroupVersion turns "group/version" string into a GroupVersion struct. It reports error +// if it cannot parse the string. +func ToGroupVersion(gv string) (GroupVersion, error) { + // this can be the internal version for the legacy kube types + // TODO once we've cleared the last uses as strings, this special case should be removed. + if (len(gv) == 0) || (gv == "/") { + return GroupVersion{}, nil + } + + switch strings.Count(gv, "/") { + case 0: + return GroupVersion{Group(gv), ""}, nil + case 1: + i := strings.Index(gv, "/") + return GroupVersion{Group(gv[:i]), Version(gv[i+1:])}, nil + default: + return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) + } +} + +type sortableSliceOfVersions []string + +func (a sortableSliceOfVersions) Len() int { return len(a) } +func (a sortableSliceOfVersions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a sortableSliceOfVersions) Less(i, j int) bool { + vi, vj := strings.TrimLeft(a[i], "v"), strings.TrimLeft(a[j], "v") + major := regexp.MustCompile("^[0-9]+") + viMajor, vjMajor := major.FindString(vi), major.FindString(vj) + viRemaining, vjRemaining := strings.TrimLeft(vi, viMajor), strings.TrimLeft(vj, vjMajor) + switch { + case len(viRemaining) == 0 && len(vjRemaining) == 0: + return viMajor < vjMajor + case len(viRemaining) == 0 && len(vjRemaining) != 0: + // stable version is greater than unstable version + return false + case len(viRemaining) != 0 && len(vjRemaining) == 0: + // stable version is greater than unstable version + return true + } + // neither are stable versions + if viMajor != vjMajor { + return viMajor < vjMajor + } + // assuming at most we have one alpha or one beta version, so if vi contains "alpha", it's the lesser one. + return strings.Contains(viRemaining, "alpha") +} + +// Determine the default version among versions. If a user calls a group client +// without specifying the version (e.g., c.CoreV1(), instead of c.CoreV1()), the +// default version will be returned. +func defaultVersion(versions []PackageVersion) Version { + var versionStrings []string + for _, version := range versions { + versionStrings = append(versionStrings, version.Version.String()) + } + sort.Sort(sortableSliceOfVersions(versionStrings)) + return Version(versionStrings[len(versionStrings)-1]) +} + +// ToGroupVersionInfo is a helper function used by generators for groups. +func ToGroupVersionInfo(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionInfo { + var groupVersionPackages []GroupVersionInfo + for _, group := range groups { + for _, version := range group.Versions { + groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version.Version}] + groupVersionPackages = append(groupVersionPackages, GroupVersionInfo{ + Group: Group(namer.IC(group.Group.NonEmpty())), + Version: Version(namer.IC(version.Version.String())), + PackageAlias: strings.ToLower(groupGoName + version.Version.NonEmpty()), + GroupGoName: groupGoName, + LowerCaseGroupGoName: namer.IL(groupGoName), + }) + } + } + return groupVersionPackages +} + +func ToGroupInstallPackages(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupInstallPackage { + var groupInstallPackages []GroupInstallPackage + for _, group := range groups { + defaultVersion := defaultVersion(group.Versions) + groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: defaultVersion}] + groupInstallPackages = append(groupInstallPackages, GroupInstallPackage{ + Group: Group(namer.IC(group.Group.NonEmpty())), + InstallPackageAlias: strings.ToLower(groupGoName), + }) + } + return groupInstallPackages +} + +// NormalizeGroupVersion calls normalizes the GroupVersion. +//func NormalizeGroupVersion(gv GroupVersion) GroupVersion { +// return GroupVersion{Group: gv.Group.NonEmpty(), Version: gv.Version, NonEmptyVersion: normalization.Version(gv.Version)} +//} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go new file mode 100644 index 00000000..7d1606c5 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +type Version string + +func (v Version) String() string { + return string(v) +} + +func (v Version) NonEmpty() string { + if v == "" { + return "internalVersion" + } + return v.String() +} + +type Group string + +func (g Group) String() string { + return string(g) +} + +func (g Group) NonEmpty() string { + if g == "api" { + return "core" + } + return string(g) +} + +type PackageVersion struct { + Version + // The fully qualified package, e.g. k8s.io/kubernetes/pkg/apis/apps, where the types.go is found. + Package string +} + +type GroupVersion struct { + Group Group + Version Version +} + +type GroupVersions struct { + // The name of the package for this group, e.g. apps. + PackageName string + Group Group + Versions []PackageVersion +} + +// GroupVersionInfo contains all the info around a group version. +type GroupVersionInfo struct { + Group Group + Version Version + PackageAlias string + GroupGoName string + LowerCaseGroupGoName string +} + +type GroupInstallPackage struct { + Group Group + InstallPackageAlias string +} diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go new file mode 100644 index 00000000..07ce6e72 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" +) + +// DefaultBasePeerDirs are the peer-dirs nearly everybody will use, i.e. those coming from +// apimachinery. +var DefaultBasePeerDirs = []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/conversion", + "k8s.io/apimachinery/pkg/runtime", +} + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + // Base peer dirs which nearly everybody will use, i.e. outside of Kubernetes core. Peer dirs + // are declared to make the generator pick up manually written conversion funcs from external + // packages. + BasePeerDirs []string + + // Custom peer dirs which are application specific. Peer dirs are declared to make the + // generator pick up manually written conversion funcs from external packages. + ExtraPeerDirs []string + + // SkipUnsafe indicates whether to generate unsafe conversions to improve the efficiency + // of these operations. The unsafe operation is a direct pointer assignment via unsafe + // (within the allowed uses of unsafe) and is equivalent to a proposed Golang change to + // allow structs that are identical to be assigned to each other. + SkipUnsafe bool +} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + BasePeerDirs: DefaultBasePeerDirs, + SkipUnsafe: false, + } + genericArgs.CustomArgs = customArgs + genericArgs.OutputFileBaseName = "conversion_generated" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.BasePeerDirs, "base-peer-dirs", ca.BasePeerDirs, + "Comma-separated list of apimachinery import paths which are considered, after tag-specified peers, for conversions. Only change these if you have very good reasons.") + pflag.CommandLine.StringSliceVar(&ca.ExtraPeerDirs, "extra-peer-dirs", ca.ExtraPeerDirs, + "Application specific comma-separated list of import paths which are considered, after tag-specified peers and base-peer-dirs, for conversions.") + pflag.CommandLine.BoolVar(&ca.SkipUnsafe, "skip-unsafe", ca.SkipUnsafe, + "If true, will not generate code using unsafe pointer conversions; resulting code may be slower.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go new file mode 100644 index 00000000..775972d1 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -0,0 +1,984 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "sort" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" + + conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" +) + +// These are the comment tags that carry parameters for conversion generation. +const ( + // e.g., "+k8s:conversion-gen=" in doc.go, where is the + // import path of the package the peer types are defined in. + // e.g., "+k8s:conversion-gen=false" in a type's comment will let + // conversion-gen skip that type. + tagName = "k8s:conversion-gen" + // e.g., "+k8s:conversion-gen-external-types=" in doc.go, where + // is the relative path to the package the types are defined in. + externalTypesTagName = "k8s:conversion-gen-external-types" +) + +func extractTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[tagName] +} + +func extractExternalTypesTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[externalTypesTagName] +} + +func isCopyOnly(comments []string) bool { + values := types.ExtractCommentTags("+", comments)["k8s:conversion-fn"] + return len(values) == 1 && values[0] == "copy-only" +} + +func isDrop(comments []string) bool { + values := types.ExtractCommentTags("+", comments)["k8s:conversion-fn"] + return len(values) == 1 && values[0] == "drop" +} + +// TODO: This is created only to reduce number of changes in a single PR. +// Remove it and use PublicNamer instead. +func conversionNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Join: func(pre string, in []string, post string) string { + return strings.Join(in, "_") + }, + PrependPackageNames: 1, + } +} + +func defaultFnNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Prefix: "SetDefaults_", + Join: func(pre string, in []string, post string) string { + return pre + strings.Join(in, "_") + post + }, + } +} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": conversionNamer(), + "raw": namer.NewRawNamer("", nil), + "defaultfn": defaultFnNamer(), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +func getPeerTypeFor(context *generator.Context, t *types.Type, potenialPeerPkgs []string) *types.Type { + for _, ppp := range potenialPeerPkgs { + p := context.Universe.Package(ppp) + if p == nil { + continue + } + if p.Has(t.Name.Name) { + return p.Type(t.Name.Name) + } + } + return nil +} + +type conversionPair struct { + inType *types.Type + outType *types.Type +} + +// All of the types in conversions map are of type "DeclarationOf" with +// the underlying type being "Func". +type conversionFuncMap map[conversionPair]*types.Type + +// Returns all manually-defined conversion functions in the package. +func getManualConversionFunctions(context *generator.Context, pkg *types.Package, manualMap conversionFuncMap) { + if pkg == nil { + klog.Warningf("Skipping nil package passed to getManualConversionFunctions") + return + } + klog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) + + scopeName := types.Ref(conversionPackagePath, "Scope").Name + errorName := types.Ref("", "error").Name + buffer := &bytes.Buffer{} + sw := generator.NewSnippetWriter(buffer, context, "$", "$") + + for _, f := range pkg.Functions { + if f.Underlying == nil || f.Underlying.Kind != types.Func { + klog.Errorf("Malformed function: %#v", f) + continue + } + if f.Underlying.Signature == nil { + klog.Errorf("Function without signature: %#v", f) + continue + } + klog.V(8).Infof("Considering function %s", f.Name) + signature := f.Underlying.Signature + // Check whether the function is conversion function. + // Note that all of them have signature: + // func Convert_inType_To_outType(inType, outType, conversion.Scope) error + if signature.Receiver != nil { + klog.V(8).Infof("%s has a receiver", f.Name) + continue + } + if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName { + klog.V(8).Infof("%s has wrong parameters", f.Name) + continue + } + if len(signature.Results) != 1 || signature.Results[0].Name != errorName { + klog.V(8).Infof("%s has wrong results", f.Name) + continue + } + inType := signature.Parameters[0] + outType := signature.Parameters[1] + if inType.Kind != types.Pointer || outType.Kind != types.Pointer { + klog.V(8).Infof("%s has wrong parameter types", f.Name) + continue + } + // Now check if the name satisfies the convention. + // TODO: This should call the Namer directly. + args := argsFromType(inType.Elem, outType.Elem) + sw.Do("Convert_$.inType|public$_To_$.outType|public$", args) + if f.Name.Name == buffer.String() { + klog.V(4).Infof("Found conversion function %s", f.Name) + key := conversionPair{inType.Elem, outType.Elem} + // We might scan the same package twice, and that's OK. + if v, ok := manualMap[key]; ok && v != nil && v.Name.Package != pkg.Path { + panic(fmt.Sprintf("duplicate static conversion defined: %s -> %s from:\n%s.%s\n%s.%s", key.inType, key.outType, v.Name.Package, v.Name.Name, f.Name.Package, f.Name.Name)) + } + manualMap[key] = f + } else { + // prevent user error when they don't get the correct conversion signature + if strings.HasPrefix(f.Name.Name, "Convert_") { + klog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) + } + klog.V(8).Infof("%s has wrong name", f.Name) + } + buffer.Reset() + } +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + packages := generator.Packages{} + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + + // Accumulate pre-existing conversion functions. + // TODO: This is too ad-hoc. We need a better way. + manualConversions := conversionFuncMap{} + + // Record types that are memory equivalent. A type is memory equivalent + // if it has the same memory layout and no nested manual conversion is + // defined. + // TODO: in the future, relax the nested manual conversion requirement + // if we can show that a large enough types are memory identical but + // have non-trivial conversion + memoryEquivalentTypes := equalMemoryTypes{} + + // We are generating conversions only for packages that are explicitly + // passed as InputDir. + processed := map[string]bool{} + for _, i := range context.Inputs { + // skip duplicates + if processed[i] { + continue + } + processed[i] = true + + klog.V(5).Infof("considering pkg %q", i) + pkg := context.Universe[i] + // typesPkg is where the versioned types are defined. Sometimes it is + // different from pkg. For example, kubernetes core/v1 types are defined + // in vendor/k8s.io/api/core/v1, while pkg is at pkg/api/v1. + typesPkg := pkg + if pkg == nil { + // If the input had no Go files, for example. + continue + } + + // Add conversion and defaulting functions. + getManualConversionFunctions(context, pkg, manualConversions) + + // Only generate conversions for packages which explicitly request it + // by specifying one or more "+k8s:conversion-gen=" + // in their doc.go file. + peerPkgs := extractTag(pkg.Comments) + if peerPkgs != nil { + klog.V(5).Infof(" tags: %q", peerPkgs) + } else { + klog.V(5).Infof(" no tag") + continue + } + skipUnsafe := false + if customArgs, ok := arguments.CustomArgs.(*conversionargs.CustomArgs); ok { + peerPkgs = append(peerPkgs, customArgs.BasePeerDirs...) + peerPkgs = append(peerPkgs, customArgs.ExtraPeerDirs...) + skipUnsafe = customArgs.SkipUnsafe + } + + // if the external types are not in the same package where the conversion functions to be generated + externalTypesValues := extractExternalTypesTag(pkg.Comments) + if externalTypesValues != nil { + if len(externalTypesValues) != 1 { + klog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) + } + externalTypes := externalTypesValues[0] + klog.V(5).Infof(" external types tags: %q", externalTypes) + var err error + typesPkg, err = context.AddDirectory(externalTypes) + if err != nil { + klog.Fatalf("cannot import package %s", externalTypes) + } + // update context.Order to the latest context.Universe + orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} + context.Order = orderer.OrderUniverse(context.Universe) + } + + // if the source path is within a /vendor/ directory (for example, + // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow + // generation to output to the proper relative path (under vendor). + // Otherwise, the generator will create the file in the wrong location + // in the output directory. + // TODO: build a more fundamental concept in gengo for dealing with modifications + // to vendored packages. + vendorless := func(pkg string) string { + if pos := strings.LastIndex(pkg, "/vendor/"); pos != -1 { + return pkg[pos+len("/vendor/"):] + } + return pkg + } + for i := range peerPkgs { + peerPkgs[i] = vendorless(peerPkgs[i]) + } + + // Make sure our peer-packages are added and fully parsed. + for _, pp := range peerPkgs { + context.AddDir(pp) + p := context.Universe[pp] + if nil == p { + klog.Fatalf("failed to find pkg: %s", pp) + } + getManualConversionFunctions(context, p, manualConversions) + } + + unsafeEquality := TypesEqual(memoryEquivalentTypes) + if skipUnsafe { + unsafeEquality = noEquality{} + } + + path := pkg.Path + // if the source path is within a /vendor/ directory (for example, + // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow + // generation to output to the proper relative path (under vendor). + // Otherwise, the generator will create the file in the wrong location + // in the output directory. + // TODO: build a more fundamental concept in gengo for dealing with modifications + // to vendored packages. + if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { + expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) + if strings.Contains(expandedPath, "/vendor/") { + path = expandedPath + } + } + packages = append(packages, + &generator.DefaultPackage{ + PackageName: filepath.Base(pkg.Path), + PackagePath: path, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenConversion(arguments.OutputFileBaseName, typesPkg.Path, pkg.Path, manualConversions, peerPkgs, unsafeEquality), + } + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return t.Name.Package == typesPkg.Path + }, + }) + } + + // If there is a manual conversion defined between two types, exclude it + // from being a candidate for unsafe conversion + for k, v := range manualConversions { + if isCopyOnly(v.CommentLines) { + klog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) + continue + } + // this type should be excluded from all equivalence, because the converter must be called. + memoryEquivalentTypes.Skip(k.inType, k.outType) + } + + return packages +} + +type equalMemoryTypes map[conversionPair]bool + +func (e equalMemoryTypes) Skip(a, b *types.Type) { + e[conversionPair{a, b}] = false + e[conversionPair{b, a}] = false +} + +func (e equalMemoryTypes) Equal(a, b *types.Type) bool { + // alreadyVisitedTypes holds all the types that have already been checked in the structural type recursion. + alreadyVisitedTypes := make(map[*types.Type]bool) + return e.cachingEqual(a, b, alreadyVisitedTypes) +} + +func (e equalMemoryTypes) cachingEqual(a, b *types.Type, alreadyVisitedTypes map[*types.Type]bool) bool { + if a == b { + return true + } + if equal, ok := e[conversionPair{a, b}]; ok { + return equal + } + if equal, ok := e[conversionPair{b, a}]; ok { + return equal + } + result := e.equal(a, b, alreadyVisitedTypes) + e[conversionPair{a, b}] = result + e[conversionPair{b, a}] = result + return result +} + +func (e equalMemoryTypes) equal(a, b *types.Type, alreadyVisitedTypes map[*types.Type]bool) bool { + in, out := unwrapAlias(a), unwrapAlias(b) + switch { + case in == out: + return true + case in.Kind == out.Kind: + // if the type exists already, return early to avoid recursion + if alreadyVisitedTypes[in] { + return true + } + alreadyVisitedTypes[in] = true + + switch in.Kind { + case types.Struct: + if len(in.Members) != len(out.Members) { + return false + } + for i, inMember := range in.Members { + outMember := out.Members[i] + if !e.cachingEqual(inMember.Type, outMember.Type, alreadyVisitedTypes) { + return false + } + } + return true + case types.Pointer: + return e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) + case types.Map: + return e.cachingEqual(in.Key, out.Key, alreadyVisitedTypes) && e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) + case types.Slice: + return e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) + case types.Interface: + // TODO: determine whether the interfaces are actually equivalent - for now, they must have the + // same type. + return false + case types.Builtin: + return in.Name.Name == out.Name.Name + } + } + return false +} + +func findMember(t *types.Type, name string) (types.Member, bool) { + if t.Kind != types.Struct { + return types.Member{}, false + } + for _, member := range t.Members { + if member.Name == name { + return member, true + } + } + return types.Member{}, false +} + +// unwrapAlias recurses down aliased types to find the bedrock type. +func unwrapAlias(in *types.Type) *types.Type { + for in.Kind == types.Alias { + in = in.Underlying + } + return in +} + +const ( + runtimePackagePath = "k8s.io/apimachinery/pkg/runtime" + conversionPackagePath = "k8s.io/apimachinery/pkg/conversion" +) + +type noEquality struct{} + +func (noEquality) Equal(_, _ *types.Type) bool { return false } + +type TypesEqual interface { + Equal(a, b *types.Type) bool +} + +// genConversion produces a file with a autogenerated conversions. +type genConversion struct { + generator.DefaultGen + // the package that contains the types that conversion func are going to be + // generated for + typesPackage string + // the package that the conversion funcs are going to be output to + outputPackage string + // packages that contain the peer of types in typesPacakge + peerPackages []string + manualConversions conversionFuncMap + imports namer.ImportTracker + types []*types.Type + skippedFields map[*types.Type][]string + useUnsafe TypesEqual +} + +func NewGenConversion(sanitizedName, typesPackage, outputPackage string, manualConversions conversionFuncMap, peerPkgs []string, useUnsafe TypesEqual) generator.Generator { + return &genConversion{ + DefaultGen: generator.DefaultGen{ + OptionalName: sanitizedName, + }, + typesPackage: typesPackage, + outputPackage: outputPackage, + peerPackages: peerPkgs, + manualConversions: manualConversions, + imports: generator.NewImportTracker(), + types: []*types.Type{}, + skippedFields: map[*types.Type][]string{}, + useUnsafe: useUnsafe, + } +} + +func (g *genConversion) Namers(c *generator.Context) namer.NameSystems { + // Have the raw namer for this file track what it imports. + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "publicIT": &namerPlusImportTracking{ + delegate: conversionNamer(), + tracker: g.imports, + }, + } +} + +type namerPlusImportTracking struct { + delegate namer.Namer + tracker namer.ImportTracker +} + +func (n *namerPlusImportTracking) Name(t *types.Type) string { + n.tracker.AddType(t) + return n.delegate.Name(t) +} + +func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type) bool { + var t *types.Type + var other *types.Type + if inType.Name.Package == g.typesPackage { + t, other = inType, outType + } else { + t, other = outType, inType + } + + if t.Name.Package != g.typesPackage { + return false + } + // If the type has opted out, skip it. + tagvals := extractTag(t.CommentLines) + if tagvals != nil { + if tagvals[0] != "false" { + klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) + } + klog.V(5).Infof("type %v requests no conversion generation, skipping", t) + return false + } + // TODO: Consider generating functions for other kinds too. + if t.Kind != types.Struct { + return false + } + // Also, filter out private types. + if namer.IsPrivateGoName(other.Name.Name) { + return false + } + return true +} + +func (g *genConversion) Filter(c *generator.Context, t *types.Type) bool { + peerType := getPeerTypeFor(c, t, g.peerPackages) + if peerType == nil { + return false + } + if !g.convertibleOnlyWithinPackage(t, peerType) { + return false + } + + g.types = append(g.types, t) + return true +} + +func (g *genConversion) isOtherPackage(pkg string) bool { + if pkg == g.outputPackage { + return false + } + if strings.HasSuffix(pkg, `"`+g.outputPackage+`"`) { + return false + } + return true +} + +func (g *genConversion) Imports(c *generator.Context) (imports []string) { + var importLines []string + for _, singleImport := range g.imports.ImportLines() { + if g.isOtherPackage(singleImport) { + importLines = append(importLines, singleImport) + } + } + return importLines +} + +func argsFromType(inType, outType *types.Type) generator.Args { + return generator.Args{ + "inType": inType, + "outType": outType, + } +} + +const nameTmpl = "Convert_$.inType|publicIT$_To_$.outType|publicIT$" + +func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, bool) { + function, ok := g.manualConversions[conversionPair{inType, outType}] + return function, ok +} + +func (g *genConversion) Init(c *generator.Context, w io.Writer) error { + if klog.V(5) { + if m, ok := g.useUnsafe.(equalMemoryTypes); ok { + var result []string + klog.Infof("All objects without identical memory layout:") + for k, v := range m { + if v { + continue + } + result = append(result, fmt.Sprintf(" %s -> %s = %t", k.inType, k.outType, v)) + } + sort.Strings(result) + for _, s := range result { + klog.Infof(s) + } + } + } + sw := generator.NewSnippetWriter(w, c, "$", "$") + sw.Do("func init() {\n", nil) + sw.Do("localSchemeBuilder.Register(RegisterConversions)\n", nil) + sw.Do("}\n", nil) + + scheme := c.Universe.Type(types.Name{Package: runtimePackagePath, Name: "Scheme"}) + schemePtr := &types.Type{ + Kind: types.Pointer, + Elem: scheme, + } + sw.Do("// RegisterConversions adds conversion functions to the given scheme.\n", nil) + sw.Do("// Public to allow building arbitrary schemes.\n", nil) + sw.Do("func RegisterConversions(s $.|raw$) error {\n", schemePtr) + for _, t := range g.types { + peerType := getPeerTypeFor(c, t, g.peerPackages) + args := argsFromType(t, peerType).With("Scope", types.Ref(conversionPackagePath, "Scope")) + sw.Do("if err := s.AddGeneratedConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return "+nameTmpl+"(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) + args = argsFromType(peerType, t).With("Scope", types.Ref(conversionPackagePath, "Scope")) + sw.Do("if err := s.AddGeneratedConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return "+nameTmpl+"(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) + } + var pairs []conversionPair + for pair, t := range g.manualConversions { + if t.Name.Package != g.outputPackage { + continue + } + pairs = append(pairs, pair) + } + // sort by name of the conversion function + sort.Slice(pairs, func(i, j int) bool { + if g.manualConversions[pairs[i]].Name.Name < g.manualConversions[pairs[j]].Name.Name { + return true + } + return false + }) + for _, pair := range pairs { + args := argsFromType(pair.inType, pair.outType).With("Scope", types.Ref(conversionPackagePath, "Scope")).With("fn", g.manualConversions[pair]) + sw.Do("if err := s.AddConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return $.fn|raw$(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) + } + + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + return sw.Error() +} + +func (g *genConversion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + klog.V(5).Infof("generating for type %v", t) + peerType := getPeerTypeFor(c, t, g.peerPackages) + sw := generator.NewSnippetWriter(w, c, "$", "$") + g.generateConversion(t, peerType, sw) + g.generateConversion(peerType, t, sw) + return sw.Error() +} + +func (g *genConversion) generateConversion(inType, outType *types.Type, sw *generator.SnippetWriter) { + args := argsFromType(inType, outType). + With("Scope", types.Ref(conversionPackagePath, "Scope")) + + sw.Do("func auto"+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) + g.generateFor(inType, outType, sw) + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + + if _, found := g.preexists(inType, outType); found { + // There is a public manual Conversion method: use it. + } else if skipped := g.skippedFields[inType]; len(skipped) != 0 { + // The inType had some fields we could not generate. + klog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) + klog.Errorf(" the following fields need manual conversion:") + for _, f := range skipped { + klog.Errorf(" - %v", f) + } + } else { + // Emit a public conversion function. + sw.Do("// "+nameTmpl+" is an autogenerated conversion function.\n", args) + sw.Do("func "+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) + sw.Do("return auto"+nameTmpl+"(in, out, s)\n", args) + sw.Do("}\n\n", nil) + } +} + +// we use the system of shadowing 'in' and 'out' so that the same code is valid +// at any nesting level. This makes the autogenerator easy to understand, and +// the compiler shouldn't care. +func (g *genConversion) generateFor(inType, outType *types.Type, sw *generator.SnippetWriter) { + klog.V(5).Infof("generating %v -> %v", inType, outType) + var f func(*types.Type, *types.Type, *generator.SnippetWriter) + + switch inType.Kind { + case types.Builtin: + f = g.doBuiltin + case types.Map: + f = g.doMap + case types.Slice: + f = g.doSlice + case types.Struct: + f = g.doStruct + case types.Pointer: + f = g.doPointer + case types.Alias: + f = g.doAlias + default: + f = g.doUnknown + } + + f(inType, outType, sw) +} + +func (g *genConversion) doBuiltin(inType, outType *types.Type, sw *generator.SnippetWriter) { + if inType == outType { + sw.Do("*out = *in\n", nil) + } else { + sw.Do("*out = $.|raw$(*in)\n", outType) + } +} + +func (g *genConversion) doMap(inType, outType *types.Type, sw *generator.SnippetWriter) { + sw.Do("*out = make($.|raw$, len(*in))\n", outType) + if isDirectlyAssignable(inType.Key, outType.Key) { + sw.Do("for key, val := range *in {\n", nil) + if isDirectlyAssignable(inType.Elem, outType.Elem) { + if inType.Key == outType.Key { + sw.Do("(*out)[key] = ", nil) + } else { + sw.Do("(*out)[$.|raw$(key)] = ", outType.Key) + } + if inType.Elem == outType.Elem { + sw.Do("val\n", nil) + } else { + sw.Do("$.|raw$(val)\n", outType.Elem) + } + } else { + sw.Do("newVal := new($.|raw$)\n", outType.Elem) + if function, ok := g.preexists(inType.Elem, outType.Elem); ok { + sw.Do("if err := $.|raw$(&val, newVal, s); err != nil {\n", function) + } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { + sw.Do("if err := "+nameTmpl+"(&val, newVal, s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&val, newVal, 0); err != nil {\n", nil) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + if inType.Key == outType.Key { + sw.Do("(*out)[key] = *newVal\n", nil) + } else { + sw.Do("(*out)[$.|raw$(key)] = *newVal\n", outType.Key) + } + } + } else { + // TODO: Implement it when necessary. + sw.Do("for range *in {\n", nil) + sw.Do("// FIXME: Converting unassignable keys unsupported $.|raw$\n", inType.Key) + } + sw.Do("}\n", nil) +} + +func (g *genConversion) doSlice(inType, outType *types.Type, sw *generator.SnippetWriter) { + sw.Do("*out = make($.|raw$, len(*in))\n", outType) + if inType.Elem == outType.Elem && inType.Elem.Kind == types.Builtin { + sw.Do("copy(*out, *in)\n", nil) + } else { + sw.Do("for i := range *in {\n", nil) + if isDirectlyAssignable(inType.Elem, outType.Elem) { + if inType.Elem == outType.Elem { + sw.Do("(*out)[i] = (*in)[i]\n", nil) + } else { + sw.Do("(*out)[i] = $.|raw$((*in)[i])\n", outType.Elem) + } + } else { + if function, ok := g.preexists(inType.Elem, outType.Elem); ok { + sw.Do("if err := $.|raw$(&(*in)[i], &(*out)[i], s); err != nil {\n", function) + } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { + sw.Do("if err := "+nameTmpl+"(&(*in)[i], &(*out)[i], s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) + } else { + // TODO: This triggers on metav1.ObjectMeta <-> metav1.ObjectMeta and + // similar because neither package is the target package, and + // we really don't know which package will have the conversion + // function defined. This fires on basically every object + // conversion outside of pkg/api/v1. + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil {\n", nil) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + } + sw.Do("}\n", nil) + } +} + +func (g *genConversion) doStruct(inType, outType *types.Type, sw *generator.SnippetWriter) { + for _, inMember := range inType.Members { + if tagvals := extractTag(inMember.CommentLines); tagvals != nil && tagvals[0] == "false" { + // This field is excluded from conversion. + sw.Do("// INFO: in."+inMember.Name+" opted out of conversion generation\n", nil) + continue + } + outMember, found := findMember(outType, inMember.Name) + if !found { + // This field doesn't exist in the peer. + sw.Do("// WARNING: in."+inMember.Name+" requires manual conversion: does not exist in peer-type\n", nil) + g.skippedFields[inType] = append(g.skippedFields[inType], inMember.Name) + continue + } + + inMemberType, outMemberType := inMember.Type, outMember.Type + // create a copy of both underlying types but give them the top level alias name (since aliases + // are assignable) + if underlying := unwrapAlias(inMemberType); underlying != inMemberType { + copied := *underlying + copied.Name = inMemberType.Name + inMemberType = &copied + } + if underlying := unwrapAlias(outMemberType); underlying != outMemberType { + copied := *underlying + copied.Name = outMemberType.Name + outMemberType = &copied + } + + args := argsFromType(inMemberType, outMemberType).With("name", inMember.Name) + + // try a direct memory copy for any type that has exactly equivalent values + if g.useUnsafe.Equal(inMemberType, outMemberType) { + args = args. + With("Pointer", types.Ref("unsafe", "Pointer")). + With("SliceHeader", types.Ref("reflect", "SliceHeader")) + switch inMemberType.Kind { + case types.Pointer: + sw.Do("out.$.name$ = ($.outType|raw$)($.Pointer|raw$(in.$.name$))\n", args) + continue + case types.Map: + sw.Do("out.$.name$ = *(*$.outType|raw$)($.Pointer|raw$(&in.$.name$))\n", args) + continue + case types.Slice: + sw.Do("out.$.name$ = *(*$.outType|raw$)($.Pointer|raw$(&in.$.name$))\n", args) + continue + } + } + + // check based on the top level name, not the underlying names + if function, ok := g.preexists(inMember.Type, outMember.Type); ok { + if isDrop(function.CommentLines) { + continue + } + // copy-only functions that are directly assignable can be inlined instead of invoked. + // As an example, conversion functions exist that allow types with private fields to be + // correctly copied between types. These functions are equivalent to a memory assignment, + // and are necessary for the reflection path, but should not block memory conversion. + // Convert_unversioned_Time_to_unversioned_Time is an example of this logic. + if !isCopyOnly(function.CommentLines) || !g.isFastConversion(inMemberType, outMemberType) { + args["function"] = function + sw.Do("if err := $.function|raw$(&in.$.name$, &out.$.name$, s); err != nil {\n", args) + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + continue + } + klog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) + } + + // If we can't auto-convert, punt before we emit any code. + if inMemberType.Kind != outMemberType.Kind { + sw.Do("// WARNING: in."+inMember.Name+" requires manual conversion: inconvertible types ("+ + inMemberType.String()+" vs "+outMemberType.String()+")\n", nil) + g.skippedFields[inType] = append(g.skippedFields[inType], inMember.Name) + continue + } + + switch inMemberType.Kind { + case types.Builtin: + if inMemberType == outMemberType { + sw.Do("out.$.name$ = in.$.name$\n", args) + } else { + sw.Do("out.$.name$ = $.outType|raw$(in.$.name$)\n", args) + } + case types.Map, types.Slice, types.Pointer: + if g.isDirectlyAssignable(inMemberType, outMemberType) { + sw.Do("out.$.name$ = in.$.name$\n", args) + continue + } + + sw.Do("if in.$.name$ != nil {\n", args) + sw.Do("in, out := &in.$.name$, &out.$.name$\n", args) + g.generateFor(inMemberType, outMemberType, sw) + sw.Do("} else {\n", nil) + sw.Do("out.$.name$ = nil\n", args) + sw.Do("}\n", nil) + case types.Struct: + if g.isDirectlyAssignable(inMemberType, outMemberType) { + sw.Do("out.$.name$ = in.$.name$\n", args) + continue + } + if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { + sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&in.$.name$, &out.$.name$, 0); err != nil {\n", args) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + case types.Alias: + if isDirectlyAssignable(inMemberType, outMemberType) { + if inMemberType == outMemberType { + sw.Do("out.$.name$ = in.$.name$\n", args) + } else { + sw.Do("out.$.name$ = $.outType|raw$(in.$.name$)\n", args) + } + } else { + if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { + sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&in.$.name$, &out.$.name$, 0); err != nil {\n", args) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + } + default: + if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { + sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&in.$.name$, &out.$.name$, 0); err != nil {\n", args) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + } + } +} + +func (g *genConversion) isFastConversion(inType, outType *types.Type) bool { + switch inType.Kind { + case types.Builtin: + return true + case types.Map, types.Slice, types.Pointer, types.Struct, types.Alias: + return g.isDirectlyAssignable(inType, outType) + default: + return false + } +} + +func (g *genConversion) isDirectlyAssignable(inType, outType *types.Type) bool { + return unwrapAlias(inType) == unwrapAlias(outType) +} + +func (g *genConversion) doPointer(inType, outType *types.Type, sw *generator.SnippetWriter) { + sw.Do("*out = new($.Elem|raw$)\n", outType) + if isDirectlyAssignable(inType.Elem, outType.Elem) { + if inType.Elem == outType.Elem { + sw.Do("**out = **in\n", nil) + } else { + sw.Do("**out = $.|raw$(**in)\n", outType.Elem) + } + } else { + if function, ok := g.preexists(inType.Elem, outType.Elem); ok { + sw.Do("if err := $.|raw$(*in, *out, s); err != nil {\n", function) + } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { + sw.Do("if err := "+nameTmpl+"(*in, *out, s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(*in, *out, 0); err != nil {\n", nil) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + } +} + +func (g *genConversion) doAlias(inType, outType *types.Type, sw *generator.SnippetWriter) { + // TODO: Add support for aliases. + g.doUnknown(inType, outType, sw) +} + +func (g *genConversion) doUnknown(inType, outType *types.Type, sw *generator.SnippetWriter) { + sw.Do("// FIXME: Type $.|raw$ is unsupported.\n", inType) +} + +func isDirectlyAssignable(inType, outType *types.Type) bool { + // TODO: This should maybe check for actual assignability between the two + // types, rather than superficial traits that happen to indicate it is + // assignable in the ways we currently use this code. + return inType.IsAssignable() && (inType.IsPrimitive() || isSamePackage(inType, outType)) +} + +func isSamePackage(inType, outType *types.Type) bool { + return inType.Name.Package == outType.Name.Package +} diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go new file mode 100644 index 00000000..215b17bd --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// conversion-gen is a tool for auto-generating functions that convert +// between internal and external types. A general conversion code +// generation task involves three sets of packages: (1) a set of +// packages containing internal types, (2) a single package containing +// the external types, and (3) a single destination package (i.e., +// where the generated conversion functions go, and where the +// developer-authored conversion functions are). The packages +// containing the internal types play the role known as "peer +// packages" in the general code-generation framework of Kubernetes. +// +// For each conversion task, `conversion-gen` will generate functions +// that efficiently convert between same-name types in the two +// (internal, external) packages. The generated functions include +// ones named +// autoConvert___To__ +// for each such pair of types --- both with (pkg1,pkg2) = +// (internal,external) and (pkg1,pkg2) = (external,internal). +// Additionally: if the destination package does not contain one in a +// non-generated file then a function named +// Convert___To__ +// is also generated and it simply calls the `autoConvert...` +// function. The generated conversion functions use standard value +// assignment wherever possible. For compound types, the generated +// conversion functions call the `Convert...` functions for the +// subsidiary types. Thus developers can override the behavior for +// selected types. For a top-level object type (i.e., the type of an +// object that will be input to an apiserver), for such an override to +// be used by the apiserver the developer-maintained conversion +// functions must also be registered by invoking the +// `AddConversionFuncs` method of the relevant `Scheme` object from +// k8s.io/apimachinery/pkg/runtime. +// +// `conversion-gen` will scan its `--input-dirs`, looking at the +// package defined in each of those directories for comment tags that +// define a conversion code generation task. A package requests +// conversion code generation by including one or more comment in the +// package's `doc.go` file (currently anywhere in that file is +// acceptable, but the recommended location is above the `package` +// statement), of the form: +// // +k8s:conversion-gen= +// This introduces a conversion task, for which the destination +// package is the one containing the file with the tag and the tag +// identifies a package containing internal types. If there is also a +// tag of the form +// // +k8s:conversion-gen-external-types= +// then it identifies the package containing the external types; +// otherwise they are in the destination package. +// +// For each conversion code generation task, the full set of internal +// packages (AKA peer packages) consists of the ones specified in the +// `k8s:conversion-gen` tags PLUS any specified in the +// `--base-peer-dirs` and `--extra-peer-dirs` flags on the command +// line. +// +// When generating for a package, individual types or fields of structs may opt +// out of Conversion generation by specifying a comment on the of the form: +// // +k8s:conversion-gen=false +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" + "k8s.io/code-generator/cmd/conversion-gen/generators" + "k8s.io/code-generator/pkg/util" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of conversion-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go new file mode 100644 index 00000000..78971301 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/deepcopy-gen/generators" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs generators.CustomArgs + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there + genericArgs.OutputFileBaseName = "deepcopy_generated" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.BoundingDirs, "bounding-dirs", ca.BoundingDirs, + "Comma-separated list of import paths which bound the types for which deep-copies will be generated.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*generators.CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go new file mode 100644 index 00000000..96fb2987 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -0,0 +1,85 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// deepcopy-gen is a tool for auto-generating DeepCopy functions. +// +// Given a list of input directories, it will generate functions that +// efficiently perform a full deep-copy of each type. For any type that +// offers a `.DeepCopy()` method, it will simply call that. Otherwise it will +// use standard value assignment whenever possible. If that is not possible it +// will try to call its own generated copy function for the type, if the type is +// within the allowed root packages. Failing that, it will fall back on +// `conversion.Cloner.DeepCopy(val)` to make the copy. The resulting file will +// be stored in the same directory as the processed source package. +// +// Generation is governed by comment tags in the source. Any package may +// request DeepCopy generation by including a comment in the file-comments of +// one file, of the form: +// // +k8s:deepcopy-gen=package +// +// DeepCopy functions can be generated for individual types, rather than the +// entire package by specifying a comment on the type definion of the form: +// // +k8s:deepcopy-gen=true +// +// When generating for a whole package, individual types may opt out of +// DeepCopy generation by specifying a comment on the of the form: +// // +k8s:deepcopy-gen=false +// +// Note that registration is a whole-package option, and is not available for +// individual types. +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/deepcopy-gen/generators" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args" + "k8s.io/code-generator/pkg/util" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of deepcopy-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go new file mode 100644 index 00000000..3c5a042c --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/defaulter-gen/generators" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs generators.CustomArgs + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there + genericArgs.OutputFileBaseName = "zz_generated.defaults" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.ExtraPeerDirs, "extra-peer-dirs", ca.ExtraPeerDirs, + "Comma-separated list of import paths which are considered, after tag-specified peers, for conversions.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*generators.CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go new file mode 100644 index 00000000..40bb875e --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// defaulter-gen is a tool for auto-generating Defaulter functions. +// +// Given a list of input directories, it will scan for top level types +// and generate efficient defaulters for an entire object from the sum +// of the SetDefault_* methods contained in the object tree. +// +// Generation is governed by comment tags in the source. Any package may +// request defaulter generation by including one or more comment tags at +// the package comment level: +// +// // +k8s:defaulter-gen= +// +// which will create defaulters for any type that contains the provided +// field name (if the type has defaulters). Any type may request explicit +// defaulting by providing the comment tag: +// +// // +k8s:defaulter-gen=true|false +// +// An existing defaulter method (`SetDefaults_TYPE`) can provide the +// comment tag: +// +// // +k8s:defaulter-gen=covers +// +// to indicate that the defaulter does not or should not call any nested +// defaulters. +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/defaulter-gen/generators" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args" + "k8s.io/code-generator/pkg/util" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of defaulter-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore new file mode 100644 index 00000000..0e9aa466 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore @@ -0,0 +1 @@ +go-to-protobuf diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS new file mode 100644 index 00000000..61365916 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- smarterclayton +reviewers: +- smarterclayton diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go new file mode 100644 index 00000000..847a6a5a --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// go-to-protobuf generates a Protobuf IDL from a Go struct, respecting any +// existing IDL tags on the Go struct. +package main + +import ( + goflag "flag" + + flag "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/go-to-protobuf/protobuf" +) + +var g = protobuf.New() + +func init() { + g.BindFlags(flag.CommandLine) + goflag.Set("logtostderr", "true") + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) +} + +func main() { + flag.Parse() + protobuf.Run(g) +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go new file mode 100644 index 00000000..e85ceb8d --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go @@ -0,0 +1,428 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// go-to-protobuf generates a Protobuf IDL from a Go struct, respecting any +// existing IDL tags on the Go struct. +package protobuf + +import ( + "bytes" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + + flag "github.com/spf13/pflag" + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/simple" + "gonum.org/v1/gonum/graph/topo" + + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/parser" + "k8s.io/gengo/types" +) + +type Generator struct { + Common args.GeneratorArgs + APIMachineryPackages string + Packages string + OutputBase string + VendorOutputBase string + ProtoImport []string + Conditional string + Clean bool + OnlyIDL bool + KeepGogoproto bool + SkipGeneratedRewrite bool + DropEmbeddedFields string +} + +func New() *Generator { + sourceTree := args.DefaultSourceTree() + common := args.GeneratorArgs{ + OutputBase: sourceTree, + GoHeaderFilePath: filepath.Join(sourceTree, util.BoilerplatePath()), + } + defaultProtoImport := filepath.Join(sourceTree, "k8s.io", "kubernetes", "vendor", "github.com", "gogo", "protobuf", "protobuf") + cwd, err := os.Getwd() + if err != nil { + log.Fatalf("Cannot get current directory.") + } + return &Generator{ + Common: common, + OutputBase: sourceTree, + VendorOutputBase: filepath.Join(cwd, "vendor"), + ProtoImport: []string{defaultProtoImport}, + APIMachineryPackages: strings.Join([]string{ + `+k8s.io/apimachinery/pkg/util/intstr`, + `+k8s.io/apimachinery/pkg/api/resource`, + `+k8s.io/apimachinery/pkg/runtime/schema`, + `+k8s.io/apimachinery/pkg/runtime`, + `k8s.io/apimachinery/pkg/apis/meta/v1`, + `k8s.io/apimachinery/pkg/apis/meta/v1beta1`, + `k8s.io/apimachinery/pkg/apis/testapigroup/v1`, + }, ","), + Packages: "", + DropEmbeddedFields: "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta", + } +} + +func (g *Generator) BindFlags(flag *flag.FlagSet) { + flag.StringVarP(&g.Common.GoHeaderFilePath, "go-header-file", "h", g.Common.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.") + flag.BoolVar(&g.Common.VerifyOnly, "verify-only", g.Common.VerifyOnly, "If true, only verify existing output, do not write anything.") + flag.StringVarP(&g.Packages, "packages", "p", g.Packages, "comma-separated list of directories to get input types from. Directories prefixed with '-' are not generated, directories prefixed with '+' only create types with explicit IDL instructions.") + flag.StringVar(&g.APIMachineryPackages, "apimachinery-packages", g.APIMachineryPackages, "comma-separated list of directories to get apimachinery input types from which are needed by any API. Directories prefixed with '-' are not generated, directories prefixed with '+' only create types with explicit IDL instructions.") + flag.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/") + flag.StringVar(&g.VendorOutputBase, "vendor-output-base", g.VendorOutputBase, "The vendor/ directory to look for packages in; defaults to $PWD/vendor/.") + flag.StringSliceVar(&g.ProtoImport, "proto-import", g.ProtoImport, "The search path for the core protobuf .protos, required; defaults $GOPATH/src/k8s.io/kubernetes/vendor/github.com/gogo/protobuf/protobuf.") + flag.StringVar(&g.Conditional, "conditional", g.Conditional, "An optional Golang build tag condition to add to the generated Go code") + flag.BoolVar(&g.Clean, "clean", g.Clean, "If true, remove all generated files for the specified Packages.") + flag.BoolVar(&g.OnlyIDL, "only-idl", g.OnlyIDL, "If true, only generate the IDL for each package.") + flag.BoolVar(&g.KeepGogoproto, "keep-gogoproto", g.KeepGogoproto, "If true, the generated IDL will contain gogoprotobuf extensions which are normally removed") + flag.BoolVar(&g.SkipGeneratedRewrite, "skip-generated-rewrite", g.SkipGeneratedRewrite, "If true, skip fixing up the generated.pb.go file (debugging only).") + flag.StringVar(&g.DropEmbeddedFields, "drop-embedded-fields", g.DropEmbeddedFields, "Comma-delimited list of embedded Go types to omit from generated protobufs") +} + +func Run(g *Generator) { + if g.Common.VerifyOnly { + g.OnlyIDL = true + g.Clean = false + } + + b := parser.New() + b.AddBuildTags("proto") + + omitTypes := map[types.Name]struct{}{} + for _, t := range strings.Split(g.DropEmbeddedFields, ",") { + name := types.Name{} + if i := strings.LastIndex(t, "."); i != -1 { + name.Package, name.Name = t[:i], t[i+1:] + } else { + name.Name = t + } + if len(name.Name) == 0 { + log.Fatalf("--drop-embedded-types requires names in the form of [GOPACKAGE.]TYPENAME: %v", t) + } + omitTypes[name] = struct{}{} + } + + boilerplate, err := g.Common.LoadGoBoilerplate() + if err != nil { + log.Fatalf("Failed loading boilerplate (consider using the go-header-file flag): %v", err) + } + + protobufNames := NewProtobufNamer() + outputPackages := generator.Packages{} + nonOutputPackages := map[string]struct{}{} + + var packages []string + if len(g.APIMachineryPackages) != 0 { + packages = append(packages, strings.Split(g.APIMachineryPackages, ",")...) + } + if len(g.Packages) != 0 { + packages = append(packages, strings.Split(g.Packages, ",")...) + } + if len(packages) == 0 { + log.Fatalf("Both apimachinery-packages and packages are empty. At least one package must be specified.") + } + + for _, d := range packages { + generateAllTypes, outputPackage := true, true + switch { + case strings.HasPrefix(d, "+"): + d = d[1:] + generateAllTypes = false + case strings.HasPrefix(d, "-"): + d = d[1:] + outputPackage = false + } + name := protoSafePackage(d) + parts := strings.SplitN(d, "=", 2) + if len(parts) > 1 { + d = parts[0] + name = parts[1] + } + p := newProtobufPackage(d, name, generateAllTypes, omitTypes) + header := append([]byte{}, boilerplate...) + header = append(header, p.HeaderText...) + p.HeaderText = header + protobufNames.Add(p) + if outputPackage { + outputPackages = append(outputPackages, p) + } else { + nonOutputPackages[name] = struct{}{} + } + } + + if !g.Common.VerifyOnly { + for _, p := range outputPackages { + if err := p.(*protobufPackage).Clean(g.OutputBase); err != nil { + log.Fatalf("Unable to clean package %s: %v", p.Name(), err) + } + } + } + + if g.Clean { + return + } + + for _, p := range protobufNames.List() { + if err := b.AddDir(p.Path()); err != nil { + log.Fatalf("Unable to add directory %q: %v", p.Path(), err) + } + } + + c, err := generator.NewContext( + b, + namer.NameSystems{ + "public": namer.NewPublicNamer(3), + "proto": protobufNames, + }, + "public", + ) + if err != nil { + log.Fatalf("Failed making a context: %v", err) + } + + c.Verify = g.Common.VerifyOnly + c.FileTypes["protoidl"] = NewProtoFile() + + // order package by imports, importees first + deps := deps(c, protobufNames.packages) + order, err := importOrder(deps) + if err != nil { + log.Fatalf("Failed to order packages by imports: %v", err) + } + topologicalPos := map[string]int{} + for i, p := range order { + topologicalPos[p] = i + } + sort.Sort(positionOrder{topologicalPos, protobufNames.packages}) + + var vendoredOutputPackages, localOutputPackages generator.Packages + for _, p := range protobufNames.packages { + if _, ok := nonOutputPackages[p.Name()]; ok { + // if we're not outputting the package, don't include it in either package list + continue + } + p.Vendored = strings.Contains(c.Universe[p.PackagePath].SourcePath, "/vendor/") + if p.Vendored { + vendoredOutputPackages = append(vendoredOutputPackages, p) + } else { + localOutputPackages = append(localOutputPackages, p) + } + } + + if err := protobufNames.AssignTypesToPackages(c); err != nil { + log.Fatalf("Failed to identify Common types: %v", err) + } + + if err := c.ExecutePackages(g.VendorOutputBase, vendoredOutputPackages); err != nil { + log.Fatalf("Failed executing vendor generator: %v", err) + } + if err := c.ExecutePackages(g.OutputBase, localOutputPackages); err != nil { + log.Fatalf("Failed executing local generator: %v", err) + } + + if g.OnlyIDL { + return + } + + if _, err := exec.LookPath("protoc"); err != nil { + log.Fatalf("Unable to find 'protoc': %v", err) + } + + searchArgs := []string{"-I", ".", "-I", g.OutputBase} + if len(g.ProtoImport) != 0 { + for _, s := range g.ProtoImport { + searchArgs = append(searchArgs, "-I", s) + } + } + args := append(searchArgs, fmt.Sprintf("--gogo_out=%s", g.OutputBase)) + + buf := &bytes.Buffer{} + if len(g.Conditional) > 0 { + fmt.Fprintf(buf, "// +build %s\n\n", g.Conditional) + } + buf.Write(boilerplate) + + for _, outputPackage := range outputPackages { + p := outputPackage.(*protobufPackage) + + path := filepath.Join(g.OutputBase, p.ImportPath()) + outputPath := filepath.Join(g.OutputBase, p.OutputPath()) + if p.Vendored { + path = filepath.Join(g.VendorOutputBase, p.ImportPath()) + outputPath = filepath.Join(g.VendorOutputBase, p.OutputPath()) + } + + // generate the gogoprotobuf protoc + cmd := exec.Command("protoc", append(args, path)...) + out, err := cmd.CombinedOutput() + if len(out) > 0 { + log.Printf(string(out)) + } + if err != nil { + log.Println(strings.Join(cmd.Args, " ")) + log.Fatalf("Unable to generate protoc on %s: %v", p.PackageName, err) + } + + if g.SkipGeneratedRewrite { + continue + } + + // alter the generated protobuf file to remove the generated types (but leave the serializers) and rewrite the + // package statement to match the desired package name + if err := RewriteGeneratedGogoProtobufFile(outputPath, p.ExtractGeneratedType, p.OptionalTypeName, buf.Bytes()); err != nil { + log.Fatalf("Unable to rewrite generated %s: %v", outputPath, err) + } + + // sort imports + cmd = exec.Command("goimports", "-w", outputPath) + out, err = cmd.CombinedOutput() + if len(out) > 0 { + log.Printf(string(out)) + } + if err != nil { + log.Println(strings.Join(cmd.Args, " ")) + log.Fatalf("Unable to rewrite imports for %s: %v", p.PackageName, err) + } + + // format and simplify the generated file + cmd = exec.Command("gofmt", "-s", "-w", outputPath) + out, err = cmd.CombinedOutput() + if len(out) > 0 { + log.Printf(string(out)) + } + if err != nil { + log.Println(strings.Join(cmd.Args, " ")) + log.Fatalf("Unable to apply gofmt for %s: %v", p.PackageName, err) + } + } + + if g.SkipGeneratedRewrite { + return + } + + if !g.KeepGogoproto { + // generate, but do so without gogoprotobuf extensions + for _, outputPackage := range outputPackages { + p := outputPackage.(*protobufPackage) + p.OmitGogo = true + } + if err := c.ExecutePackages(g.VendorOutputBase, vendoredOutputPackages); err != nil { + log.Fatalf("Failed executing vendor generator: %v", err) + } + if err := c.ExecutePackages(g.OutputBase, localOutputPackages); err != nil { + log.Fatalf("Failed executing local generator: %v", err) + } + } + + for _, outputPackage := range outputPackages { + p := outputPackage.(*protobufPackage) + + if len(p.StructTags) == 0 { + continue + } + + pattern := filepath.Join(g.OutputBase, p.PackagePath, "*.go") + if p.Vendored { + pattern = filepath.Join(g.VendorOutputBase, p.PackagePath, "*.go") + } + files, err := filepath.Glob(pattern) + if err != nil { + log.Fatalf("Can't glob pattern %q: %v", pattern, err) + } + + for _, s := range files { + if strings.HasSuffix(s, "_test.go") { + continue + } + if err := RewriteTypesWithProtobufStructTags(s, p.StructTags); err != nil { + log.Fatalf("Unable to rewrite with struct tags %s: %v", s, err) + } + } + } +} + +func deps(c *generator.Context, pkgs []*protobufPackage) map[string][]string { + ret := map[string][]string{} + for _, p := range pkgs { + for _, d := range c.Universe[p.PackagePath].Imports { + ret[p.PackagePath] = append(ret[p.PackagePath], d.Path) + } + } + return ret +} + +func importOrder(deps map[string][]string) ([]string, error) { + nodes := map[string]graph.Node{} + names := map[int64]string{} + g := simple.NewDirectedGraph() + for pkg, imports := range deps { + for _, imp := range imports { + if _, found := nodes[pkg]; !found { + n := g.NewNode() + g.AddNode(n) + nodes[pkg] = n + names[n.ID()] = pkg + } + if _, found := nodes[imp]; !found { + n := g.NewNode() + g.AddNode(n) + nodes[imp] = n + names[n.ID()] = imp + } + g.SetEdge(g.NewEdge(nodes[imp], nodes[pkg])) + } + } + + ret := []string{} + sorted, err := topo.Sort(g) + if err != nil { + return nil, err + } + for _, n := range sorted { + ret = append(ret, names[n.ID()]) + fmt.Println("topological order", names[n.ID()]) + } + return ret, nil +} + +type positionOrder struct { + pos map[string]int + elements []*protobufPackage +} + +func (o positionOrder) Len() int { + return len(o.elements) +} + +func (o positionOrder) Less(i, j int) bool { + return o.pos[o.elements[i].PackagePath] < o.pos[o.elements[j].PackagePath] +} + +func (o positionOrder) Swap(i, j int) { + x := o.elements[i] + o.elements[i] = o.elements[j] + o.elements[j] = x +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go new file mode 100644 index 00000000..1a9803dc --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go @@ -0,0 +1,773 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "fmt" + "io" + "log" + "reflect" + "sort" + "strconv" + "strings" + + "k8s.io/klog" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// genProtoIDL produces a .proto IDL. +type genProtoIDL struct { + generator.DefaultGen + localPackage types.Name + localGoPackage types.Name + imports namer.ImportTracker + + generateAll bool + omitGogo bool + omitFieldTypes map[types.Name]struct{} +} + +func (g *genProtoIDL) PackageVars(c *generator.Context) []string { + if g.omitGogo { + return []string{ + fmt.Sprintf("option go_package = %q;", g.localGoPackage.Name), + } + } + return []string{ + "option (gogoproto.marshaler_all) = true;", + "option (gogoproto.stable_marshaler_all) = true;", + "option (gogoproto.sizer_all) = true;", + "option (gogoproto.goproto_stringer_all) = false;", + "option (gogoproto.stringer_all) = true;", + "option (gogoproto.unmarshaler_all) = true;", + "option (gogoproto.goproto_unrecognized_all) = false;", + "option (gogoproto.goproto_enum_prefix_all) = false;", + "option (gogoproto.goproto_getters_all) = false;", + fmt.Sprintf("option go_package = %q;", g.localGoPackage.Name), + } +} +func (g *genProtoIDL) Filename() string { return g.OptionalName + ".proto" } +func (g *genProtoIDL) FileType() string { return "protoidl" } +func (g *genProtoIDL) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + // The local namer returns the correct protobuf name for a proto type + // in the context of a package + "local": localNamer{g.localPackage}, + } +} + +// Filter ignores types that are identified as not exportable. +func (g *genProtoIDL) Filter(c *generator.Context, t *types.Type) bool { + tagVals := types.ExtractCommentTags("+", t.CommentLines)["protobuf"] + if tagVals != nil { + if tagVals[0] == "false" { + // Type specified "false". + return false + } + if tagVals[0] == "true" { + // Type specified "true". + return true + } + klog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0]) + } + if !g.generateAll { + // We're not generating everything. + return false + } + seen := map[*types.Type]bool{} + ok := isProtoable(seen, t) + return ok +} + +func isProtoable(seen map[*types.Type]bool, t *types.Type) bool { + if seen[t] { + // be optimistic in the case of type cycles. + return true + } + seen[t] = true + switch t.Kind { + case types.Builtin: + return true + case types.Alias: + return isProtoable(seen, t.Underlying) + case types.Slice, types.Pointer: + return isProtoable(seen, t.Elem) + case types.Map: + return isProtoable(seen, t.Key) && isProtoable(seen, t.Elem) + case types.Struct: + if len(t.Members) == 0 { + return true + } + for _, m := range t.Members { + if isProtoable(seen, m.Type) { + return true + } + } + return false + case types.Func, types.Chan: + return false + case types.DeclarationOf, types.Unknown, types.Unsupported: + return false + case types.Interface: + return false + default: + log.Printf("WARNING: type %q is not portable: %s", t.Kind, t.Name) + return false + } +} + +// isOptionalAlias should return true if the specified type has an underlying type +// (is an alias) of a map or slice and has the comment tag protobuf.nullable=true, +// indicating that the type should be nullable in protobuf. +func isOptionalAlias(t *types.Type) bool { + if t.Underlying == nil || (t.Underlying.Kind != types.Map && t.Underlying.Kind != types.Slice) { + return false + } + if extractBoolTagOrDie("protobuf.nullable", t.CommentLines) == false { + return false + } + return true +} + +func (g *genProtoIDL) Imports(c *generator.Context) (imports []string) { + lines := []string{} + // TODO: this could be expressed more cleanly + for _, line := range g.imports.ImportLines() { + if g.omitGogo && line == "github.com/gogo/protobuf/gogoproto/gogo.proto" { + continue + } + lines = append(lines, line) + } + return lines +} + +// GenerateType makes the body of a file implementing a set for type t. +func (g *genProtoIDL) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + b := bodyGen{ + locator: &protobufLocator{ + namer: c.Namers["proto"].(ProtobufFromGoNamer), + tracker: g.imports, + universe: c.Universe, + + localGoPackage: g.localGoPackage.Package, + }, + localPackage: g.localPackage, + + omitGogo: g.omitGogo, + omitFieldTypes: g.omitFieldTypes, + + t: t, + } + switch t.Kind { + case types.Alias: + return b.doAlias(sw) + case types.Struct: + return b.doStruct(sw) + default: + return b.unknown(sw) + } +} + +// ProtobufFromGoNamer finds the protobuf name of a type (and its package, and +// the package path) from its Go name. +type ProtobufFromGoNamer interface { + GoNameToProtoName(name types.Name) types.Name +} + +type ProtobufLocator interface { + ProtoTypeFor(t *types.Type) (*types.Type, error) + GoTypeForName(name types.Name) *types.Type + CastTypeName(name types.Name) string +} + +type protobufLocator struct { + namer ProtobufFromGoNamer + tracker namer.ImportTracker + universe types.Universe + + localGoPackage string +} + +// CastTypeName returns the cast type name of a Go type +// TODO: delegate to a new localgo namer? +func (p protobufLocator) CastTypeName(name types.Name) string { + if name.Package == p.localGoPackage { + return name.Name + } + return name.String() +} + +func (p protobufLocator) GoTypeForName(name types.Name) *types.Type { + if len(name.Package) == 0 { + name.Package = p.localGoPackage + } + return p.universe.Type(name) +} + +// ProtoTypeFor locates a Protobuf type for the provided Go type (if possible). +func (p protobufLocator) ProtoTypeFor(t *types.Type) (*types.Type, error) { + switch { + // we've already converted the type, or it's a map + case t.Kind == types.Protobuf || t.Kind == types.Map: + p.tracker.AddType(t) + return t, nil + } + // it's a fundamental type + if t, ok := isFundamentalProtoType(t); ok { + p.tracker.AddType(t) + return t, nil + } + // it's a message + if t.Kind == types.Struct || isOptionalAlias(t) { + t := &types.Type{ + Name: p.namer.GoNameToProtoName(t.Name), + Kind: types.Protobuf, + + CommentLines: t.CommentLines, + } + p.tracker.AddType(t) + return t, nil + } + return nil, errUnrecognizedType +} + +type bodyGen struct { + locator ProtobufLocator + localPackage types.Name + omitGogo bool + omitFieldTypes map[types.Name]struct{} + + t *types.Type +} + +func (b bodyGen) unknown(sw *generator.SnippetWriter) error { + return fmt.Errorf("not sure how to generate: %#v", b.t) +} + +func (b bodyGen) doAlias(sw *generator.SnippetWriter) error { + if !isOptionalAlias(b.t) { + return nil + } + + var kind string + switch b.t.Underlying.Kind { + case types.Map: + kind = "map" + default: + kind = "slice" + } + optional := &types.Type{ + Name: b.t.Name, + Kind: types.Struct, + + CommentLines: b.t.CommentLines, + SecondClosestCommentLines: b.t.SecondClosestCommentLines, + Members: []types.Member{ + { + Name: "Items", + CommentLines: []string{fmt.Sprintf("items, if empty, will result in an empty %s\n", kind)}, + Type: b.t.Underlying, + }, + }, + } + nested := b + nested.t = optional + return nested.doStruct(sw) +} + +func (b bodyGen) doStruct(sw *generator.SnippetWriter) error { + if len(b.t.Name.Name) == 0 { + return nil + } + if namer.IsPrivateGoName(b.t.Name.Name) { + return nil + } + + var alias *types.Type + var fields []protoField + options := []string{} + allOptions := types.ExtractCommentTags("+", b.t.CommentLines) + for k, v := range allOptions { + switch { + case strings.HasPrefix(k, "protobuf.options."): + key := strings.TrimPrefix(k, "protobuf.options.") + switch key { + case "marshal": + if v[0] == "false" { + if !b.omitGogo { + options = append(options, + "(gogoproto.marshaler) = false", + "(gogoproto.unmarshaler) = false", + "(gogoproto.sizer) = false", + ) + } + } + default: + if !b.omitGogo || !strings.HasPrefix(key, "(gogoproto.") { + if key == "(gogoproto.goproto_stringer)" && v[0] == "false" { + options = append(options, "(gogoproto.stringer) = false") + } + options = append(options, fmt.Sprintf("%s = %s", key, v[0])) + } + } + // protobuf.as allows a type to have the same message contents as another Go type + case k == "protobuf.as": + fields = nil + if alias = b.locator.GoTypeForName(types.Name{Name: v[0]}); alias == nil { + return fmt.Errorf("type %v references alias %q which does not exist", b.t, v[0]) + } + // protobuf.embed instructs the generator to use the named type in this package + // as an embedded message. + case k == "protobuf.embed": + fields = []protoField{ + { + Tag: 1, + Name: v[0], + Type: &types.Type{ + Name: types.Name{ + Name: v[0], + Package: b.localPackage.Package, + Path: b.localPackage.Path, + }, + }, + }, + } + } + } + if alias == nil { + alias = b.t + } + + // If we don't explicitly embed anything, generate fields by traversing fields. + if fields == nil { + memberFields, err := membersToFields(b.locator, alias, b.localPackage, b.omitFieldTypes) + if err != nil { + return fmt.Errorf("type %v cannot be converted to protobuf: %v", b.t, err) + } + fields = memberFields + } + + out := sw.Out() + genComment(out, b.t.CommentLines, "") + sw.Do(`message $.Name.Name$ { +`, b.t) + + if len(options) > 0 { + sort.Sort(sort.StringSlice(options)) + for _, s := range options { + fmt.Fprintf(out, " option %s;\n", s) + } + fmt.Fprintln(out) + } + + for i, field := range fields { + genComment(out, field.CommentLines, " ") + fmt.Fprintf(out, " ") + switch { + case field.Map: + case field.Repeated: + fmt.Fprintf(out, "repeated ") + case field.Required: + fmt.Fprintf(out, "required ") + default: + fmt.Fprintf(out, "optional ") + } + sw.Do(`$.Type|local$ $.Name$ = $.Tag$`, field) + if len(field.Extras) > 0 { + extras := []string{} + for k, v := range field.Extras { + if b.omitGogo && strings.HasPrefix(k, "(gogoproto.") { + continue + } + extras = append(extras, fmt.Sprintf("%s = %s", k, v)) + } + sort.Sort(sort.StringSlice(extras)) + if len(extras) > 0 { + fmt.Fprintf(out, " [") + fmt.Fprint(out, strings.Join(extras, ", ")) + fmt.Fprintf(out, "]") + } + } + fmt.Fprintf(out, ";\n") + if i != len(fields)-1 { + fmt.Fprintf(out, "\n") + } + } + fmt.Fprintf(out, "}\n\n") + return nil +} + +type protoField struct { + LocalPackage types.Name + + Tag int + Name string + Type *types.Type + Map bool + Repeated bool + Optional bool + Required bool + Nullable bool + Extras map[string]string + + CommentLines []string +} + +var ( + errUnrecognizedType = fmt.Errorf("did not recognize the provided type") +) + +func isFundamentalProtoType(t *types.Type) (*types.Type, bool) { + // TODO: when we enable proto3, also include other fundamental types in the google.protobuf package + // switch { + // case t.Kind == types.Struct && t.Name == types.Name{Package: "time", Name: "Time"}: + // return &types.Type{ + // Kind: types.Protobuf, + // Name: types.Name{Path: "google/protobuf/timestamp.proto", Package: "google.protobuf", Name: "Timestamp"}, + // }, true + // } + switch t.Kind { + case types.Slice: + if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { + return &types.Type{Name: types.Name{Name: "bytes"}, Kind: types.Protobuf}, true + } + case types.Builtin: + switch t.Name.Name { + case "string", "uint32", "int32", "uint64", "int64", "bool": + return &types.Type{Name: types.Name{Name: t.Name.Name}, Kind: types.Protobuf}, true + case "int": + return &types.Type{Name: types.Name{Name: "int64"}, Kind: types.Protobuf}, true + case "uint": + return &types.Type{Name: types.Name{Name: "uint64"}, Kind: types.Protobuf}, true + case "float64", "float": + return &types.Type{Name: types.Name{Name: "double"}, Kind: types.Protobuf}, true + case "float32": + return &types.Type{Name: types.Name{Name: "float"}, Kind: types.Protobuf}, true + case "uintptr": + return &types.Type{Name: types.Name{Name: "uint64"}, Kind: types.Protobuf}, true + } + // TODO: complex? + } + return t, false +} + +func memberTypeToProtobufField(locator ProtobufLocator, field *protoField, t *types.Type) error { + var err error + switch t.Kind { + case types.Protobuf: + field.Type, err = locator.ProtoTypeFor(t) + case types.Builtin: + field.Type, err = locator.ProtoTypeFor(t) + case types.Map: + valueField := &protoField{} + if err := memberTypeToProtobufField(locator, valueField, t.Elem); err != nil { + return err + } + keyField := &protoField{} + if err := memberTypeToProtobufField(locator, keyField, t.Key); err != nil { + return err + } + // All other protobuf types have kind types.Protobuf, so setting types.Map + // here would be very misleading. + field.Type = &types.Type{ + Kind: types.Protobuf, + Key: keyField.Type, + Elem: valueField.Type, + } + if !strings.HasPrefix(t.Name.Name, "map[") { + field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) + } + if k, ok := keyField.Extras["(gogoproto.casttype)"]; ok { + field.Extras["(gogoproto.castkey)"] = k + } + if v, ok := valueField.Extras["(gogoproto.casttype)"]; ok { + field.Extras["(gogoproto.castvalue)"] = v + } + field.Map = true + case types.Pointer: + if err := memberTypeToProtobufField(locator, field, t.Elem); err != nil { + return err + } + field.Nullable = true + case types.Alias: + if isOptionalAlias(t) { + field.Type, err = locator.ProtoTypeFor(t) + field.Nullable = true + } else { + if err := memberTypeToProtobufField(locator, field, t.Underlying); err != nil { + log.Printf("failed to alias: %s %s: err %v", t.Name, t.Underlying.Name, err) + return err + } + // If this is not an alias to a slice, cast to the alias + if !field.Repeated { + if field.Extras == nil { + field.Extras = make(map[string]string) + } + field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) + } + } + case types.Slice: + if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { + field.Type = &types.Type{Name: types.Name{Name: "bytes"}, Kind: types.Protobuf} + return nil + } + if err := memberTypeToProtobufField(locator, field, t.Elem); err != nil { + return err + } + field.Repeated = true + case types.Struct: + if len(t.Name.Name) == 0 { + return errUnrecognizedType + } + field.Type, err = locator.ProtoTypeFor(t) + field.Nullable = false + default: + return errUnrecognizedType + } + return err +} + +// protobufTagToField extracts information from an existing protobuf tag +func protobufTagToField(tag string, field *protoField, m types.Member, t *types.Type, localPackage types.Name) error { + if len(tag) == 0 || tag == "-" { + return nil + } + + // protobuf:"bytes,3,opt,name=Id,customtype=github.com/gogo/protobuf/test.Uuid" + parts := strings.Split(tag, ",") + if len(parts) < 3 { + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, not enough segments\n", m.Name, t.Name) + } + protoTag, err := strconv.Atoi(parts[1]) + if err != nil { + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, field ID is %q which is not an integer: %v\n", m.Name, t.Name, parts[1], err) + } + field.Tag = protoTag + + // In general there is doesn't make sense to parse the protobuf tags to get the type, + // as all auto-generated once will have wire type "bytes", "varint" or "fixed64". + // However, sometimes we explicitly set them to have a custom serialization, e.g.: + // type Time struct { + // time.Time `protobuf:"Timestamp,1,req,name=time"` + // } + // to force the generator to use a given type (that we manually wrote serialization & + // deserialization methods for). + switch parts[0] { + case "varint", "fixed32", "fixed64", "bytes", "group": + default: + name := types.Name{} + if last := strings.LastIndex(parts[0], "."); last != -1 { + prefix := parts[0][:last] + name = types.Name{ + Name: parts[0][last+1:], + Package: prefix, + Path: strings.Replace(prefix, ".", "/", -1), + } + } else { + name = types.Name{ + Name: parts[0], + Package: localPackage.Package, + Path: localPackage.Path, + } + } + field.Type = &types.Type{ + Name: name, + Kind: types.Protobuf, + } + } + + protoExtra := make(map[string]string) + for i, extra := range parts[3:] { + parts := strings.SplitN(extra, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, tag %d should be key=value, got %q\n", m.Name, t.Name, i+4, extra) + } + switch parts[0] { + case "name": + protoExtra[parts[0]] = parts[1] + case "casttype", "castkey", "castvalue": + parts[0] = fmt.Sprintf("(gogoproto.%s)", parts[0]) + protoExtra[parts[0]] = strconv.Quote(parts[1]) + } + } + + field.Extras = protoExtra + if name, ok := protoExtra["name"]; ok { + field.Name = name + delete(protoExtra, "name") + } + + return nil +} + +func membersToFields(locator ProtobufLocator, t *types.Type, localPackage types.Name, omitFieldTypes map[types.Name]struct{}) ([]protoField, error) { + fields := []protoField{} + + for _, m := range t.Members { + if namer.IsPrivateGoName(m.Name) { + // skip private fields + continue + } + if _, ok := omitFieldTypes[types.Name{Name: m.Type.Name.Name, Package: m.Type.Name.Package}]; ok { + continue + } + tags := reflect.StructTag(m.Tags) + field := protoField{ + LocalPackage: localPackage, + + Tag: -1, + Extras: make(map[string]string), + } + + protobufTag := tags.Get("protobuf") + if protobufTag == "-" { + continue + } + + if err := protobufTagToField(protobufTag, &field, m, t, localPackage); err != nil { + return nil, err + } + + // extract information from JSON field tag + if tag := tags.Get("json"); len(tag) > 0 { + parts := strings.Split(tag, ",") + if len(field.Name) == 0 && len(parts[0]) != 0 { + field.Name = parts[0] + } + if field.Tag == -1 && field.Name == "-" { + continue + } + } + + if field.Type == nil { + if err := memberTypeToProtobufField(locator, &field, m.Type); err != nil { + return nil, fmt.Errorf("unable to embed type %q as field %q in %q: %v", m.Type, field.Name, t.Name, err) + } + } + if len(field.Name) == 0 { + field.Name = namer.IL(m.Name) + } + + if field.Map && field.Repeated { + // maps cannot be repeated + field.Repeated = false + field.Nullable = true + } + + if !field.Nullable { + field.Extras["(gogoproto.nullable)"] = "false" + } + if (field.Type.Name.Name == "bytes" && field.Type.Name.Package == "") || (field.Repeated && field.Type.Name.Package == "" && namer.IsPrivateGoName(field.Type.Name.Name)) { + delete(field.Extras, "(gogoproto.nullable)") + } + if field.Name != m.Name { + field.Extras["(gogoproto.customname)"] = strconv.Quote(m.Name) + } + field.CommentLines = m.CommentLines + fields = append(fields, field) + } + + // assign tags + highest := 0 + byTag := make(map[int]*protoField) + // fields are in Go struct order, which we preserve + for i := range fields { + field := &fields[i] + tag := field.Tag + if tag != -1 { + if existing, ok := byTag[tag]; ok { + return nil, fmt.Errorf("field %q and %q both have tag %d", field.Name, existing.Name, tag) + } + byTag[tag] = field + } + if tag > highest { + highest = tag + } + } + // starting from the highest observed tag, assign new field tags + for i := range fields { + field := &fields[i] + if field.Tag != -1 { + continue + } + highest++ + field.Tag = highest + byTag[field.Tag] = field + } + return fields, nil +} + +func genComment(out io.Writer, lines []string, indent string) { + for { + l := len(lines) + if l == 0 || len(lines[l-1]) != 0 { + break + } + lines = lines[:l-1] + } + for _, c := range lines { + if len(c) == 0 { + fmt.Fprintf(out, "%s//\n", indent) // avoid trailing whitespace + continue + } + fmt.Fprintf(out, "%s// %s\n", indent, c) + } +} + +func formatProtoFile(source []byte) ([]byte, error) { + // TODO; Is there any protobuf formatter? + return source, nil +} + +func assembleProtoFile(w io.Writer, f *generator.File) { + w.Write(f.Header) + + fmt.Fprint(w, "syntax = 'proto2';\n\n") + + if len(f.PackageName) > 0 { + fmt.Fprintf(w, "package %s;\n\n", f.PackageName) + } + + if len(f.Imports) > 0 { + imports := []string{} + for i := range f.Imports { + imports = append(imports, i) + } + sort.Strings(imports) + for _, s := range imports { + fmt.Fprintf(w, "import %q;\n", s) + } + fmt.Fprint(w, "\n") + } + + if f.Vars.Len() > 0 { + fmt.Fprintf(w, "%s\n", f.Vars.String()) + } + + w.Write(f.Body.Bytes()) +} + +func NewProtoFile() *generator.DefaultFileType { + return &generator.DefaultFileType{ + Format: formatProtoFile, + Assemble: assembleProtoFile, + } +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go new file mode 100644 index 00000000..08a991b1 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +type ImportTracker struct { + namer.DefaultImportTracker +} + +func NewImportTracker(local types.Name, typesToAdd ...*types.Type) *ImportTracker { + tracker := namer.NewDefaultImportTracker(local) + tracker.IsInvalidType = func(t *types.Type) bool { return t.Kind != types.Protobuf } + tracker.LocalName = func(name types.Name) string { return name.Package } + tracker.PrintImport = func(path, name string) string { return path } + + tracker.AddTypes(typesToAdd...) + return &ImportTracker{ + DefaultImportTracker: tracker, + } +} + +// AddNullable ensures that support for the nullable Gogo-protobuf extension is added. +func (tracker *ImportTracker) AddNullable() { + tracker.AddType(&types.Type{ + Kind: types.Protobuf, + Name: types.Name{ + Name: "nullable", + Package: "gogoproto", + Path: "github.com/gogo/protobuf/gogoproto/gogo.proto", + }, + }) +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go new file mode 100644 index 00000000..e3b21c67 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go @@ -0,0 +1,208 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "fmt" + "reflect" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +type localNamer struct { + localPackage types.Name +} + +func (n localNamer) Name(t *types.Type) string { + if t.Key != nil && t.Elem != nil { + return fmt.Sprintf("map<%s, %s>", n.Name(t.Key), n.Name(t.Elem)) + } + if len(n.localPackage.Package) != 0 && n.localPackage.Package == t.Name.Package { + return t.Name.Name + } + return t.Name.String() +} + +type protobufNamer struct { + packages []*protobufPackage + packagesByPath map[string]*protobufPackage +} + +func NewProtobufNamer() *protobufNamer { + return &protobufNamer{ + packagesByPath: make(map[string]*protobufPackage), + } +} + +func (n *protobufNamer) Name(t *types.Type) string { + if t.Kind == types.Map { + return fmt.Sprintf("map<%s, %s>", n.Name(t.Key), n.Name(t.Elem)) + } + return t.Name.String() +} + +func (n *protobufNamer) List() []generator.Package { + packages := make([]generator.Package, 0, len(n.packages)) + for i := range n.packages { + packages = append(packages, n.packages[i]) + } + return packages +} + +func (n *protobufNamer) Add(p *protobufPackage) { + if _, ok := n.packagesByPath[p.PackagePath]; !ok { + n.packagesByPath[p.PackagePath] = p + n.packages = append(n.packages, p) + } +} + +func (n *protobufNamer) GoNameToProtoName(name types.Name) types.Name { + if p, ok := n.packagesByPath[name.Package]; ok { + return types.Name{ + Name: name.Name, + Package: p.PackageName, + Path: p.ImportPath(), + } + } + for _, p := range n.packages { + if _, ok := p.FilterTypes[name]; ok { + return types.Name{ + Name: name.Name, + Package: p.PackageName, + Path: p.ImportPath(), + } + } + } + return types.Name{Name: name.Name} +} + +func protoSafePackage(name string) string { + pkg := strings.Replace(name, "/", ".", -1) + return strings.Replace(pkg, "-", "_", -1) +} + +type typeNameSet map[types.Name]*protobufPackage + +// assignGoTypeToProtoPackage looks for Go and Protobuf types that are referenced by a type in +// a package. It will not recurse into protobuf types. +func assignGoTypeToProtoPackage(p *protobufPackage, t *types.Type, local, global typeNameSet, optional map[types.Name]struct{}) { + newT, isProto := isFundamentalProtoType(t) + if isProto { + t = newT + } + if otherP, ok := global[t.Name]; ok { + if _, ok := local[t.Name]; !ok { + p.Imports.AddType(&types.Type{ + Kind: types.Protobuf, + Name: otherP.ProtoTypeName(), + }) + } + return + } + if t.Name.Package == p.PackagePath { + // Associate types only to their own package + global[t.Name] = p + } + if _, ok := local[t.Name]; ok { + return + } + // don't recurse into existing proto types + if isProto { + p.Imports.AddType(t) + return + } + + local[t.Name] = p + for _, m := range t.Members { + if namer.IsPrivateGoName(m.Name) { + continue + } + field := &protoField{} + tag := reflect.StructTag(m.Tags).Get("protobuf") + if tag == "-" { + continue + } + if err := protobufTagToField(tag, field, m, t, p.ProtoTypeName()); err == nil && field.Type != nil { + assignGoTypeToProtoPackage(p, field.Type, local, global, optional) + continue + } + assignGoTypeToProtoPackage(p, m.Type, local, global, optional) + } + // TODO: should methods be walked? + if t.Elem != nil { + assignGoTypeToProtoPackage(p, t.Elem, local, global, optional) + } + if t.Key != nil { + assignGoTypeToProtoPackage(p, t.Key, local, global, optional) + } + if t.Underlying != nil { + if t.Kind == types.Alias && isOptionalAlias(t) { + optional[t.Name] = struct{}{} + } + assignGoTypeToProtoPackage(p, t.Underlying, local, global, optional) + } +} + +// isTypeApplicableToProtobuf checks to see if a type is relevant for protobuf processing. +// Currently, it filters out functions and private types. +func isTypeApplicableToProtobuf(t *types.Type) bool { + // skip functions -- we don't care about them for protobuf + if t.Kind == types.Func || (t.Kind == types.DeclarationOf && t.Underlying.Kind == types.Func) { + return false + } + // skip private types + if namer.IsPrivateGoName(t.Name.Name) { + return false + } + + return true +} + +func (n *protobufNamer) AssignTypesToPackages(c *generator.Context) error { + global := make(typeNameSet) + for _, p := range n.packages { + local := make(typeNameSet) + optional := make(map[types.Name]struct{}) + p.Imports = NewImportTracker(p.ProtoTypeName()) + for _, t := range c.Order { + if t.Name.Package != p.PackagePath { + continue + } + if !isTypeApplicableToProtobuf(t) { + // skip types that we don't care about, like functions + continue + } + assignGoTypeToProtoPackage(p, t, local, global, optional) + } + p.FilterTypes = make(map[types.Name]struct{}) + p.LocalNames = make(map[string]struct{}) + p.OptionalTypeNames = make(map[string]struct{}) + for k, v := range local { + if v == p { + p.FilterTypes[k] = struct{}{} + p.LocalNames[k.Name] = struct{}{} + if _, ok := optional[k]; ok { + p.OptionalTypeNames[k.Name] = struct{}{} + } + } + } + } + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go new file mode 100644 index 00000000..bed4c3e3 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go @@ -0,0 +1,215 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "fmt" + "go/ast" + "log" + "os" + "path/filepath" + "reflect" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" +) + +func newProtobufPackage(packagePath, packageName string, generateAll bool, omitFieldTypes map[types.Name]struct{}) *protobufPackage { + pkg := &protobufPackage{ + DefaultPackage: generator.DefaultPackage{ + // The protobuf package name (foo.bar.baz) + PackageName: packageName, + // A path segment relative to the GOPATH root (foo/bar/baz) + PackagePath: packagePath, + HeaderText: []byte( + ` +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +`), + PackageDocumentation: []byte(fmt.Sprintf( + `// Package %s is an autogenerated protobuf IDL. +`, packageName)), + }, + GenerateAll: generateAll, + OmitFieldTypes: omitFieldTypes, + } + pkg.FilterFunc = pkg.filterFunc + pkg.GeneratorFunc = pkg.generatorFunc + return pkg +} + +// protobufPackage contains the protobuf implementation of Package. +type protobufPackage struct { + generator.DefaultPackage + + // If true, this package has been vendored into our source tree and thus can + // only be generated by changing the vendor tree. + Vendored bool + + // If true, generate protobuf serializations for all public types. + // If false, only generate protobuf serializations for structs that + // request serialization. + GenerateAll bool + + // A list of types to filter to; if not specified all types will be included. + FilterTypes map[types.Name]struct{} + + // If true, omit any gogoprotobuf extensions not defined as types. + OmitGogo bool + + // A list of field types that will be excluded from the output struct + OmitFieldTypes map[types.Name]struct{} + + // A list of names that this package exports + LocalNames map[string]struct{} + + // A list of type names in this package that will need marshaller rewriting + // to remove synthetic protobuf fields. + OptionalTypeNames map[string]struct{} + + // A list of struct tags to generate onto named struct fields + StructTags map[string]map[string]string + + // An import tracker for this package + Imports *ImportTracker +} + +func (p *protobufPackage) Clean(outputBase string) error { + for _, s := range []string{p.ImportPath(), p.OutputPath()} { + if err := os.Remove(filepath.Join(outputBase, s)); err != nil && !os.IsNotExist(err) { + return err + } + } + return nil +} + +func (p *protobufPackage) ProtoTypeName() types.Name { + return types.Name{ + Name: p.Path(), // the go path "foo/bar/baz" + Package: p.Name(), // the protobuf package "foo.bar.baz" + Path: p.ImportPath(), // the path of the import to get the proto + } +} + +func (p *protobufPackage) filterFunc(c *generator.Context, t *types.Type) bool { + switch t.Kind { + case types.Func, types.Chan: + return false + case types.Struct: + if t.Name.Name == "struct{}" { + return false + } + case types.Builtin: + return false + case types.Alias: + if !isOptionalAlias(t) { + return false + } + case types.Slice, types.Array, types.Map: + return false + case types.Pointer: + return false + } + if _, ok := isFundamentalProtoType(t); ok { + return false + } + _, ok := p.FilterTypes[t.Name] + return ok +} + +func (p *protobufPackage) HasGoType(name string) bool { + _, ok := p.LocalNames[name] + return ok +} + +func (p *protobufPackage) OptionalTypeName(name string) bool { + _, ok := p.OptionalTypeNames[name] + return ok +} + +func (p *protobufPackage) ExtractGeneratedType(t *ast.TypeSpec) bool { + if !p.HasGoType(t.Name.Name) { + return false + } + + switch s := t.Type.(type) { + case *ast.StructType: + for i, f := range s.Fields.List { + if len(f.Tag.Value) == 0 { + continue + } + tag := strings.Trim(f.Tag.Value, "`") + protobufTag := reflect.StructTag(tag).Get("protobuf") + if len(protobufTag) == 0 { + continue + } + if len(f.Names) > 1 { + log.Printf("WARNING: struct %s field %d %s: defined multiple names but single protobuf tag", t.Name.Name, i, f.Names[0].Name) + // TODO hard error? + } + if p.StructTags == nil { + p.StructTags = make(map[string]map[string]string) + } + m := p.StructTags[t.Name.Name] + if m == nil { + m = make(map[string]string) + p.StructTags[t.Name.Name] = m + } + m[f.Names[0].Name] = tag + } + default: + log.Printf("WARNING: unexpected Go AST type definition: %#v", t) + } + + return true +} + +func (p *protobufPackage) generatorFunc(c *generator.Context) []generator.Generator { + generators := []generator.Generator{} + + p.Imports.AddNullable() + + generators = append(generators, &genProtoIDL{ + DefaultGen: generator.DefaultGen{ + OptionalName: "generated", + }, + localPackage: types.Name{Package: p.PackageName, Path: p.PackagePath}, + localGoPackage: types.Name{Package: p.PackagePath, Name: p.GoPackageName()}, + imports: p.Imports, + generateAll: p.GenerateAll, + omitGogo: p.OmitGogo, + omitFieldTypes: p.OmitFieldTypes, + }) + return generators +} + +func (p *protobufPackage) GoPackageName() string { + return filepath.Base(p.PackagePath) +} + +func (p *protobufPackage) ImportPath() string { + return filepath.Join(p.PackagePath, "generated.proto") +} + +func (p *protobufPackage) OutputPath() string { + return filepath.Join(p.PackagePath, "generated.pb.go") +} + +var ( + _ = generator.Package(&protobufPackage{}) +) diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go new file mode 100644 index 00000000..3115bc68 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go @@ -0,0 +1,452 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "bytes" + "errors" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io/ioutil" + "os" + "reflect" + "strings" + + customreflect "k8s.io/code-generator/third_party/forked/golang/reflect" +) + +func rewriteFile(name string, header []byte, rewriteFn func(*token.FileSet, *ast.File) error) error { + fset := token.NewFileSet() + src, err := ioutil.ReadFile(name) + if err != nil { + return err + } + file, err := parser.ParseFile(fset, name, src, parser.DeclarationErrors|parser.ParseComments) + if err != nil { + return err + } + + if err := rewriteFn(fset, file); err != nil { + return err + } + + b := &bytes.Buffer{} + b.Write(header) + if err := printer.Fprint(b, fset, file); err != nil { + return err + } + + body, err := format.Source(b.Bytes()) + if err != nil { + return err + } + + f, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer f.Close() + if _, err := f.Write(body); err != nil { + return err + } + return f.Close() +} + +// ExtractFunc extracts information from the provided TypeSpec and returns true if the type should be +// removed from the destination file. +type ExtractFunc func(*ast.TypeSpec) bool + +// OptionalFunc returns true if the provided local name is a type that has protobuf.nullable=true +// and should have its marshal functions adjusted to remove the 'Items' accessor. +type OptionalFunc func(name string) bool + +func RewriteGeneratedGogoProtobufFile(name string, extractFn ExtractFunc, optionalFn OptionalFunc, header []byte) error { + return rewriteFile(name, header, func(fset *token.FileSet, file *ast.File) error { + cmap := ast.NewCommentMap(fset, file, file.Comments) + + // transform methods that point to optional maps or slices + for _, d := range file.Decls { + rewriteOptionalMethods(d, optionalFn) + } + + // remove types that are already declared + decls := []ast.Decl{} + for _, d := range file.Decls { + if dropExistingTypeDeclarations(d, extractFn) { + continue + } + if dropEmptyImportDeclarations(d) { + continue + } + decls = append(decls, d) + } + file.Decls = decls + + // remove unmapped comments + file.Comments = cmap.Filter(file).Comments() + return nil + }) +} + +// rewriteOptionalMethods makes specific mutations to marshaller methods that belong to types identified +// as being "optional" (they may be nil on the wire). This allows protobuf to serialize a map or slice and +// properly discriminate between empty and nil (which is not possible in protobuf). +// TODO: move into upstream gogo-protobuf once https://github.com/gogo/protobuf/issues/181 +// has agreement +func rewriteOptionalMethods(decl ast.Decl, isOptional OptionalFunc) { + switch t := decl.(type) { + case *ast.FuncDecl: + ident, ptr, ok := receiver(t) + if !ok { + return + } + + // correct initialization of the form `m.Field = &OptionalType{}` to + // `m.Field = OptionalType{}` + if t.Name.Name == "Unmarshal" { + ast.Walk(optionalAssignmentVisitor{fn: isOptional}, t.Body) + } + + if !isOptional(ident.Name) { + return + } + + switch t.Name.Name { + case "Unmarshal": + ast.Walk(&optionalItemsVisitor{}, t.Body) + case "MarshalTo", "Size", "String", "MarshalToSizedBuffer": + ast.Walk(&optionalItemsVisitor{}, t.Body) + fallthrough + case "Marshal": + // if the method has a pointer receiver, set it back to a normal receiver + if ptr { + t.Recv.List[0].Type = ident + } + } + } +} + +type optionalAssignmentVisitor struct { + fn OptionalFunc +} + +// Visit walks the provided node, transforming field initializations of the form +// m.Field = &OptionalType{} -> m.Field = OptionalType{} +func (v optionalAssignmentVisitor) Visit(n ast.Node) ast.Visitor { + switch t := n.(type) { + case *ast.AssignStmt: + if len(t.Lhs) == 1 && len(t.Rhs) == 1 { + if !isFieldSelector(t.Lhs[0], "m", "") { + return nil + } + unary, ok := t.Rhs[0].(*ast.UnaryExpr) + if !ok || unary.Op != token.AND { + return nil + } + composite, ok := unary.X.(*ast.CompositeLit) + if !ok || composite.Type == nil || len(composite.Elts) != 0 { + return nil + } + if ident, ok := composite.Type.(*ast.Ident); ok && v.fn(ident.Name) { + t.Rhs[0] = composite + } + } + return nil + } + return v +} + +type optionalItemsVisitor struct{} + +// Visit walks the provided node, looking for specific patterns to transform that match +// the effective outcome of turning struct{ map[x]y || []x } into map[x]y or []x. +func (v *optionalItemsVisitor) Visit(n ast.Node) ast.Visitor { + switch t := n.(type) { + case *ast.RangeStmt: + if isFieldSelector(t.X, "m", "Items") { + t.X = &ast.Ident{Name: "m"} + } + case *ast.AssignStmt: + if len(t.Lhs) == 1 && len(t.Rhs) == 1 { + switch lhs := t.Lhs[0].(type) { + case *ast.IndexExpr: + if isFieldSelector(lhs.X, "m", "Items") { + lhs.X = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + default: + if isFieldSelector(t.Lhs[0], "m", "Items") { + t.Lhs[0] = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + switch rhs := t.Rhs[0].(type) { + case *ast.CallExpr: + if ident, ok := rhs.Fun.(*ast.Ident); ok && ident.Name == "append" { + ast.Walk(v, rhs) + if len(rhs.Args) > 0 { + switch arg := rhs.Args[0].(type) { + case *ast.Ident: + if arg.Name == "m" { + rhs.Args[0] = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + } + return nil + } + } + } + case *ast.IfStmt: + switch cond := t.Cond.(type) { + case *ast.BinaryExpr: + if cond.Op == token.EQL { + if isFieldSelector(cond.X, "m", "Items") && isIdent(cond.Y, "nil") { + cond.X = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + } + if t.Init != nil { + // Find form: + // if err := m[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + // return err + // } + switch s := t.Init.(type) { + case *ast.AssignStmt: + if call, ok := s.Rhs[0].(*ast.CallExpr); ok { + if sel, ok := call.Fun.(*ast.SelectorExpr); ok { + if x, ok := sel.X.(*ast.IndexExpr); ok { + // m[] -> (*m)[] + if sel2, ok := x.X.(*ast.SelectorExpr); ok { + if ident, ok := sel2.X.(*ast.Ident); ok && ident.Name == "m" { + x.X = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + // len(m.Items) -> len(*m) + if bin, ok := x.Index.(*ast.BinaryExpr); ok { + if call2, ok := bin.X.(*ast.CallExpr); ok && len(call2.Args) == 1 { + if isFieldSelector(call2.Args[0], "m", "Items") { + call2.Args[0] = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + } + } + } + } + } + } + case *ast.IndexExpr: + if isFieldSelector(t.X, "m", "Items") { + t.X = &ast.Ident{Name: "m"} + return nil + } + case *ast.CallExpr: + changed := false + for i := range t.Args { + if isFieldSelector(t.Args[i], "m", "Items") { + t.Args[i] = &ast.Ident{Name: "m"} + changed = true + } + } + if changed { + return nil + } + } + return v +} + +func isFieldSelector(n ast.Expr, name, field string) bool { + s, ok := n.(*ast.SelectorExpr) + if !ok || s.Sel == nil || (field != "" && s.Sel.Name != field) { + return false + } + return isIdent(s.X, name) +} + +func isIdent(n ast.Expr, value string) bool { + ident, ok := n.(*ast.Ident) + return ok && ident.Name == value +} + +func receiver(f *ast.FuncDecl) (ident *ast.Ident, pointer bool, ok bool) { + if f.Recv == nil || len(f.Recv.List) != 1 { + return nil, false, false + } + switch t := f.Recv.List[0].Type.(type) { + case *ast.StarExpr: + identity, ok := t.X.(*ast.Ident) + if !ok { + return nil, false, false + } + return identity, true, true + case *ast.Ident: + return t, false, true + } + return nil, false, false +} + +// dropExistingTypeDeclarations removes any type declaration for which extractFn returns true. The function +// returns true if the entire declaration should be dropped. +func dropExistingTypeDeclarations(decl ast.Decl, extractFn ExtractFunc) bool { + switch t := decl.(type) { + case *ast.GenDecl: + if t.Tok != token.TYPE { + return false + } + specs := []ast.Spec{} + for _, s := range t.Specs { + switch spec := s.(type) { + case *ast.TypeSpec: + if extractFn(spec) { + continue + } + specs = append(specs, spec) + } + } + if len(specs) == 0 { + return true + } + t.Specs = specs + } + return false +} + +// dropEmptyImportDeclarations strips any generated but no-op imports from the generated code +// to prevent generation from being able to define side-effects. The function returns true +// if the entire declaration should be dropped. +func dropEmptyImportDeclarations(decl ast.Decl) bool { + switch t := decl.(type) { + case *ast.GenDecl: + if t.Tok != token.IMPORT { + return false + } + specs := []ast.Spec{} + for _, s := range t.Specs { + switch spec := s.(type) { + case *ast.ImportSpec: + if spec.Name != nil && spec.Name.Name == "_" { + continue + } + specs = append(specs, spec) + } + } + if len(specs) == 0 { + return true + } + t.Specs = specs + } + return false +} + +func RewriteTypesWithProtobufStructTags(name string, structTags map[string]map[string]string) error { + return rewriteFile(name, []byte{}, func(fset *token.FileSet, file *ast.File) error { + allErrs := []error{} + + // set any new struct tags + for _, d := range file.Decls { + if errs := updateStructTags(d, structTags, []string{"protobuf"}); len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + } + + if len(allErrs) > 0 { + var s string + for _, err := range allErrs { + s += err.Error() + "\n" + } + return errors.New(s) + } + return nil + }) +} + +func updateStructTags(decl ast.Decl, structTags map[string]map[string]string, toCopy []string) []error { + var errs []error + t, ok := decl.(*ast.GenDecl) + if !ok { + return nil + } + if t.Tok != token.TYPE { + return nil + } + + for _, s := range t.Specs { + spec, ok := s.(*ast.TypeSpec) + if !ok { + continue + } + typeName := spec.Name.Name + fieldTags, ok := structTags[typeName] + if !ok { + continue + } + st, ok := spec.Type.(*ast.StructType) + if !ok { + continue + } + + for i := range st.Fields.List { + f := st.Fields.List[i] + var name string + if len(f.Names) == 0 { + switch t := f.Type.(type) { + case *ast.Ident: + name = t.Name + case *ast.SelectorExpr: + name = t.Sel.Name + default: + errs = append(errs, fmt.Errorf("unable to get name for tag from struct %q, field %#v", spec.Name.Name, t)) + continue + } + } else { + name = f.Names[0].Name + } + value, ok := fieldTags[name] + if !ok { + continue + } + var tags customreflect.StructTags + if f.Tag != nil { + oldTags, err := customreflect.ParseStructTags(strings.Trim(f.Tag.Value, "`")) + if err != nil { + errs = append(errs, fmt.Errorf("unable to read struct tag from struct %q, field %q: %v", spec.Name.Name, name, err)) + continue + } + tags = oldTags + } + for _, name := range toCopy { + // don't overwrite existing tags + if tags.Has(name) { + continue + } + // append new tags + if v := reflect.StructTag(value).Get(name); len(v) > 0 { + tags = append(tags, customreflect.StructTag{Name: name, Value: v}) + } + } + if len(tags) == 0 { + continue + } + if f.Tag == nil { + f.Tag = &ast.BasicLit{} + } + f.Tag.Value = tags.String() + } + } + return errs +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go new file mode 100644 index 00000000..8e2a1917 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if +// it exists, the value is boolean. If the tag did not exist, it returns +// false. +func extractBoolTagOrDie(key string, lines []string) bool { + val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) + if err != nil { + klog.Fatal(err) + } + return val +} diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore b/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore new file mode 100644 index 00000000..a5c47b66 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore @@ -0,0 +1 @@ +import-boss diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/main.go b/vendor/k8s.io/code-generator/cmd/import-boss/main.go new file mode 100644 index 00000000..0080f01e --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/import-boss/main.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// import-boss enforces import restrictions in a given repository. +// +// When a directory is verified, import-boss looks for a file called +// ".import-restrictions". If this file is not found, parent directories will be +// recursively searched. +// +// If an ".import-restrictions" file is found, then all imports of the package +// are checked against each "rule" in the file. A rule consists of three parts: +// +// - A SelectorRegexp, to select the import paths that the rule applies to. +// +// - A list of AllowedPrefixes +// +// - A list of ForbiddenPrefixes +// +// An import is allowed if it matches at least one allowed prefix and does not +// match any forbidden prefix. An example file looks like this: +// +// { +// "Rules": [ +// { +// "SelectorRegexp": "k8s[.]io", +// "AllowedPrefixes": [ +// "k8s.io/gengo/examples", +// "k8s.io/kubernetes/third_party" +// ], +// "ForbiddenPrefixes": [ +// "k8s.io/kubernetes/pkg/third_party/deprecated" +// ] +// }, +// { +// "SelectorRegexp": "^unsafe$", +// "AllowedPrefixes": [ +// ], +// "ForbiddenPrefixes": [ +// "" +// ] +// } +// ] +// } +// +// Note the second block explicitly matches the unsafe package, and forbids it +// ("" is a prefix of everything). +package main + +import ( + "os" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/import-boss/generators" + + "k8s.io/klog" +) + +func main() { + klog.InitFlags(nil) + arguments := args.Default() + + // Override defaults. + arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + arguments.InputDirs = []string{ + "k8s.io/kubernetes/pkg/...", + "k8s.io/kubernetes/cmd/...", + "k8s.io/kubernetes/plugin/...", + } + pflag.CommandLine.BoolVar(&arguments.IncludeTestFiles, "include-test-files", false, "If true, include *_test.go files.") + + if err := arguments.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Errorf("Error: %v", err) + os.Exit(1) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go new file mode 100644 index 00000000..ba7f7209 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + codegenutil "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + VersionedClientSetPackage string + InternalClientSetPackage string + ListersPackage string + SingleDirectory bool +} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + SingleDirectory: false, + } + genericArgs.CustomArgs = customArgs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/informers") + customArgs.VersionedClientSetPackage = path.Join(pkg, "pkg/client/clientset/versioned") + customArgs.InternalClientSetPackage = path.Join(pkg, "pkg/client/clientset/internalversion") + customArgs.ListersPackage = path.Join(pkg, "pkg/client/listers") + } + + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&ca.InternalClientSetPackage, "internal-clientset-package", ca.InternalClientSetPackage, "the full package name for the internal clientset to use") + fs.StringVar(&ca.VersionedClientSetPackage, "versioned-clientset-package", ca.VersionedClientSetPackage, "the full package name for the versioned clientset to use") + fs.StringVar(&ca.ListersPackage, "listers-package", ca.ListersPackage, "the full package name for the listers to use") + fs.BoolVar(&ca.SingleDirectory, "single-directory", ca.SingleDirectory, "if true, omit the intermediate \"internalversion\" and \"externalversions\" subdirectories") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + customArgs := genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + if len(customArgs.VersionedClientSetPackage) == 0 { + return fmt.Errorf("versioned clientset package cannot be empty") + } + if len(customArgs.ListersPackage) == 0 { + return fmt.Errorf("listers package cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go new file mode 100644 index 00000000..6e579310 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -0,0 +1,258 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// factoryGenerator produces a file of listers for a given GroupVersion and +// type. +type factoryGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + groupVersions map[string]clientgentypes.GroupVersions + gvGoNames map[string]string + clientSetPackage string + internalInterfacesPackage string + filtered bool +} + +var _ generator.Generator = &factoryGenerator{} + +func (g *factoryGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *factoryGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *factoryGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + gvInterfaces := make(map[string]*types.Type) + gvNewFuncs := make(map[string]*types.Type) + for groupPkgName := range g.groupVersions { + gvInterfaces[groupPkgName] = c.Universe.Type(types.Name{Package: path.Join(g.outputPackage, groupPkgName), Name: "Interface"}) + gvNewFuncs[groupPkgName] = c.Universe.Function(types.Name{Package: path.Join(g.outputPackage, groupPkgName), Name: "New"}) + } + m := map[string]interface{}{ + "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "groupVersions": g.groupVersions, + "gvInterfaces": gvInterfaces, + "gvNewFuncs": gvNewFuncs, + "gvGoNames": g.gvGoNames, + "interfacesNewInformerFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "NewInformerFunc"}), + "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), + "informerFactoryInterface": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), + "clientSetInterface": c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}), + "reflectType": c.Universe.Type(reflectType), + "runtimeObject": c.Universe.Type(runtimeObject), + "schemaGroupVersionResource": c.Universe.Type(schemaGroupVersionResource), + "syncMutex": c.Universe.Type(syncMutex), + "timeDuration": c.Universe.Type(timeDuration), + "namespaceAll": c.Universe.Type(metav1NamespaceAll), + "object": c.Universe.Type(metav1Object), + } + + sw.Do(sharedInformerFactoryStruct, m) + sw.Do(sharedInformerFactoryInterface, m) + + return sw.Error() +} + +var sharedInformerFactoryStruct = ` +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client {{.clientSetInterface|raw}} + namespace string + tweakListOptions {{.interfacesTweakListOptionsFunc|raw}} + lock {{.syncMutex|raw}} + defaultResync {{.timeDuration|raw}} + customResync map[{{.reflectType|raw}}]{{.timeDuration|raw}} + + informers map[{{.reflectType|raw}}]{{.cacheSharedIndexInformer|raw}} + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[{{.reflectType|raw}}]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[{{.object|raw}}]{{.timeDuration|raw}}) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, namespace string, tweakListOptions {{.interfacesTweakListOptionsFunc|raw}}) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[{{.reflectType|raw}}]{{.cacheSharedIndexInformer|raw}}), + startedInformers: make(map[{{.reflectType|raw}}]bool), + customResync: make(map[{{.reflectType|raw}}]{{.timeDuration|raw}}), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func()map[reflect.Type]cache.SharedIndexInformer{ + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj {{.runtimeObject|raw}}, newFunc {{.interfacesNewInformerFunc|raw}}) {{.cacheSharedIndexInformer|raw}} { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +` + +var sharedInformerFactoryInterface = ` +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + {{.informerFactoryInterface|raw}} + ForResource(resource {{.schemaGroupVersionResource|raw}}) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + {{$gvInterfaces := .gvInterfaces}} + {{$gvGoNames := .gvGoNames}} + {{range $groupName, $group := .groupVersions}}{{index $gvGoNames $groupName}}() {{index $gvInterfaces $groupName|raw}} + {{end}} +} + +{{$gvNewFuncs := .gvNewFuncs}} +{{$gvGoNames := .gvGoNames}} +{{range $groupPkgName, $group := .groupVersions}} +func (f *sharedInformerFactory) {{index $gvGoNames $groupPkgName}}() {{index $gvInterfaces $groupPkgName|raw}} { + return {{index $gvNewFuncs $groupPkgName|raw}}(f, f.namespace, f.tweakListOptions) +} +{{end}} +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go new file mode 100644 index 00000000..fc0668c5 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go @@ -0,0 +1,90 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// factoryInterfaceGenerator produces a file of interfaces used to break a dependency cycle for +// informer registration +type factoryInterfaceGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + clientSetPackage string + filtered bool +} + +var _ generator.Generator = &factoryInterfaceGenerator{} + +func (g *factoryInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *factoryInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *factoryInterfaceGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + m := map[string]interface{}{ + "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "clientSetPackage": c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}), + "runtimeObject": c.Universe.Type(runtimeObject), + "timeDuration": c.Universe.Type(timeDuration), + "v1ListOptions": c.Universe.Type(v1ListOptions), + } + + sw.Do(externalSharedInformerFactoryInterface, m) + + return sw.Error() +} + +var externalSharedInformerFactoryInterface = ` +// NewInformerFunc takes {{.clientSetPackage|raw}} and {{.timeDuration|raw}} to return a SharedIndexInformer. +type NewInformerFunc func({{.clientSetPackage|raw}}, {{.timeDuration|raw}}) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj {{.runtimeObject|raw}}, newFunc NewInformerFunc) {{.cacheSharedIndexInformer|raw}} +} + +// TweakListOptionsFunc is a function that transforms a {{.v1ListOptions|raw}}. +type TweakListOptionsFunc func(*{{.v1ListOptions|raw}}) +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go new file mode 100644 index 00000000..cad90799 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -0,0 +1,184 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "sort" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + codegennamer "k8s.io/code-generator/pkg/namer" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// genericGenerator generates the generic informer. +type genericGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + groupVersions map[string]clientgentypes.GroupVersions + groupGoNames map[string]string + typesForGroupVersion map[clientgentypes.GroupVersion][]*types.Type + filtered bool +} + +var _ generator.Generator = &genericGenerator{} + +func (g *genericGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *genericGenerator) Namers(c *generator.Context) namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), + "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "resource": codegennamer.NewTagOverrideNamer("resourceName", namer.NewAllLowercasePluralNamer(pluralExceptions)), + } +} + +func (g *genericGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + imports = append(imports, "fmt") + return +} + +type group struct { + GroupGoName string + Name string + Versions []*version +} + +type groupSort []group + +func (g groupSort) Len() int { return len(g) } +func (g groupSort) Less(i, j int) bool { return strings.ToLower(g[i].Name) < strings.ToLower(g[j].Name) } +func (g groupSort) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +type version struct { + Name string + GoName string + Resources []*types.Type +} + +type versionSort []*version + +func (v versionSort) Len() int { return len(v) } +func (v versionSort) Less(i, j int) bool { + return strings.ToLower(v[i].Name) < strings.ToLower(v[j].Name) +} +func (v versionSort) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +func (g *genericGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + groups := []group{} + schemeGVs := make(map[*version]*types.Type) + + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + for groupPackageName, groupVersions := range g.groupVersions { + group := group{ + GroupGoName: g.groupGoNames[groupPackageName], + Name: groupVersions.Group.NonEmpty(), + Versions: []*version{}, + } + for _, v := range groupVersions.Versions { + gv := clientgentypes.GroupVersion{Group: groupVersions.Group, Version: v.Version} + version := &version{ + Name: v.Version.NonEmpty(), + GoName: namer.IC(v.Version.NonEmpty()), + Resources: orderer.OrderTypes(g.typesForGroupVersion[gv]), + } + func() { + schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) + }() + group.Versions = append(group.Versions, version) + } + sort.Sort(versionSort(group.Versions)) + groups = append(groups, group) + } + sort.Sort(groupSort(groups)) + + m := map[string]interface{}{ + "cacheGenericLister": c.Universe.Type(cacheGenericLister), + "cacheNewGenericLister": c.Universe.Function(cacheNewGenericLister), + "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "groups": groups, + "schemeGVs": schemeGVs, + "schemaGroupResource": c.Universe.Type(schemaGroupResource), + "schemaGroupVersionResource": c.Universe.Type(schemaGroupVersionResource), + } + + sw.Do(genericInformer, m) + sw.Do(forResource, m) + + return sw.Error() +} + +var genericInformer = ` +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() {{.cacheSharedIndexInformer|raw}} + Lister() {{.cacheGenericLister|raw}} +} + +type genericInformer struct { + informer {{.cacheSharedIndexInformer|raw}} + resource {{.schemaGroupResource|raw}} +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() {{.cacheSharedIndexInformer|raw}} { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() {{.cacheGenericLister|raw}} { + return {{.cacheNewGenericLister|raw}}(f.Informer().GetIndexer(), f.resource) +} +` + +var forResource = ` +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource {{.schemaGroupVersionResource|raw}}) (GenericInformer, error) { + switch resource { + {{range $group := .groups -}}{{$GroupGoName := .GroupGoName -}} + {{range $version := .Versions -}} + // Group={{$group.Name}}, Version={{.Name}} + {{range .Resources -}} + case {{index $.schemeGVs $version|raw}}.WithResource("{{.|resource}}"): + return &genericInformer{resource: resource.GroupResource(), informer: f.{{$GroupGoName}}().{{$version.GoName}}().{{.|publicPlural}}().Informer()}, nil + {{end}} + {{end}} + {{end -}} + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go new file mode 100644 index 00000000..0bba93c4 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path/filepath" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// groupInterfaceGenerator generates the per-group interface file. +type groupInterfaceGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + groupVersions clientgentypes.GroupVersions + filtered bool + internalInterfacesPackage string +} + +var _ generator.Generator = &groupInterfaceGenerator{} + +func (g *groupInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *groupInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *groupInterfaceGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +type versionData struct { + Name string + Interface *types.Type + New *types.Type +} + +func (g *groupInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + versions := make([]versionData, 0, len(g.groupVersions.Versions)) + for _, version := range g.groupVersions.Versions { + gv := clientgentypes.GroupVersion{Group: g.groupVersions.Group, Version: version.Version} + versionPackage := filepath.Join(g.outputPackage, strings.ToLower(gv.Version.NonEmpty())) + iface := c.Universe.Type(types.Name{Package: versionPackage, Name: "Interface"}) + versions = append(versions, versionData{ + Name: namer.IC(version.Version.NonEmpty()), + Interface: iface, + New: c.Universe.Function(types.Name{Package: versionPackage, Name: "New"}), + }) + } + m := map[string]interface{}{ + "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), + "interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), + "versions": versions, + } + + sw.Do(groupTemplate, m) + + return sw.Error() +} + +var groupTemplate = ` +// Interface provides access to each of this group's versions. +type Interface interface { + $range .versions -$ + // $.Name$ provides access to shared informers for resources in $.Name$. + $.Name$() $.Interface|raw$ + $end$ +} + +type group struct { + factory $.interfacesSharedInformerFactory|raw$ + namespace string + tweakListOptions $.interfacesTweakListOptionsFunc|raw$ +} + +// New returns a new Interface. +func New(f $.interfacesSharedInformerFactory|raw$, namespace string, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +$range .versions$ +// $.Name$ returns a new $.Interface|raw$. +func (g *group) $.Name$() $.Interface|raw$ { + return $.New|raw$(g.factory, g.namespace, g.tweakListOptions) +} +$end$ +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go new file mode 100644 index 00000000..9204d621 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go @@ -0,0 +1,186 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "io" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + + "k8s.io/klog" +) + +// informerGenerator produces a file of listers for a given GroupVersion and +// type. +type informerGenerator struct { + generator.DefaultGen + outputPackage string + groupPkgName string + groupVersion clientgentypes.GroupVersion + groupGoName string + typeToGenerate *types.Type + imports namer.ImportTracker + clientSetPackage string + listersPackage string + internalInterfacesPackage string +} + +var _ generator.Generator = &informerGenerator{} + +func (g *informerGenerator) Filter(c *generator.Context, t *types.Type) bool { + return t == g.typeToGenerate +} + +func (g *informerGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *informerGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *informerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + klog.V(5).Infof("processing type %v", t) + + listerPackage := fmt.Sprintf("%s/%s/%s", g.listersPackage, g.groupPkgName, strings.ToLower(g.groupVersion.Version.NonEmpty())) + clientSetInterface := c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}) + informerFor := "InformerFor" + + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + + m := map[string]interface{}{ + "apiScheme": c.Universe.Type(apiScheme), + "cacheIndexers": c.Universe.Type(cacheIndexers), + "cacheListWatch": c.Universe.Type(cacheListWatch), + "cacheMetaNamespaceIndexFunc": c.Universe.Function(cacheMetaNamespaceIndexFunc), + "cacheNamespaceIndex": c.Universe.Variable(cacheNamespaceIndex), + "cacheNewSharedIndexInformer": c.Universe.Function(cacheNewSharedIndexInformer), + "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "clientSetInterface": clientSetInterface, + "group": namer.IC(g.groupGoName), + "informerFor": informerFor, + "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), + "interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), + "listOptions": c.Universe.Type(listOptions), + "lister": c.Universe.Type(types.Name{Package: listerPackage, Name: t.Name.Name + "Lister"}), + "namespaceAll": c.Universe.Type(metav1NamespaceAll), + "namespaced": !tags.NonNamespaced, + "newLister": c.Universe.Function(types.Name{Package: listerPackage, Name: "New" + t.Name.Name + "Lister"}), + "runtimeObject": c.Universe.Type(runtimeObject), + "timeDuration": c.Universe.Type(timeDuration), + "type": t, + "v1ListOptions": c.Universe.Type(v1ListOptions), + "version": namer.IC(g.groupVersion.Version.String()), + "watchInterface": c.Universe.Type(watchInterface), + } + + sw.Do(typeInformerInterface, m) + sw.Do(typeInformerStruct, m) + sw.Do(typeInformerPublicConstructor, m) + sw.Do(typeFilteredInformerPublicConstructor, m) + sw.Do(typeInformerConstructor, m) + sw.Do(typeInformerInformer, m) + sw.Do(typeInformerLister, m) + + return sw.Error() +} + +var typeInformerInterface = ` +// $.type|public$Informer provides access to a shared informer and lister for +// $.type|publicPlural$. +type $.type|public$Informer interface { + Informer() $.cacheSharedIndexInformer|raw$ + Lister() $.lister|raw$ +} +` + +var typeInformerStruct = ` +type $.type|private$Informer struct { + factory $.interfacesSharedInformerFactory|raw$ + tweakListOptions $.interfacesTweakListOptionsFunc|raw$ + $if .namespaced$namespace string$end$ +} +` + +var typeInformerPublicConstructor = ` +// New$.type|public$Informer constructs a new informer for $.type|public$ type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func New$.type|public$Informer(client $.clientSetInterface|raw$$if .namespaced$, namespace string$end$, resyncPeriod $.timeDuration|raw$, indexers $.cacheIndexers|raw$) $.cacheSharedIndexInformer|raw$ { + return NewFiltered$.type|public$Informer(client$if .namespaced$, namespace$end$, resyncPeriod, indexers, nil) +} +` + +var typeFilteredInformerPublicConstructor = ` +// NewFiltered$.type|public$Informer constructs a new informer for $.type|public$ type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFiltered$.type|public$Informer(client $.clientSetInterface|raw$$if .namespaced$, namespace string$end$, resyncPeriod $.timeDuration|raw$, indexers $.cacheIndexers|raw$, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) $.cacheSharedIndexInformer|raw$ { + return $.cacheNewSharedIndexInformer|raw$( + &$.cacheListWatch|raw${ + ListFunc: func(options $.v1ListOptions|raw$) ($.runtimeObject|raw$, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).List(options) + }, + WatchFunc: func(options $.v1ListOptions|raw$) ($.watchInterface|raw$, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).Watch(options) + }, + }, + &$.type|raw${}, + resyncPeriod, + indexers, + ) +} +` + +var typeInformerConstructor = ` +func (f *$.type|private$Informer) defaultInformer(client $.clientSetInterface|raw$, resyncPeriod $.timeDuration|raw$) $.cacheSharedIndexInformer|raw$ { + return NewFiltered$.type|public$Informer(client$if .namespaced$, f.namespace$end$, resyncPeriod, $.cacheIndexers|raw${$.cacheNamespaceIndex|raw$: $.cacheMetaNamespaceIndexFunc|raw$}, f.tweakListOptions) +} +` + +var typeInformerInformer = ` +func (f *$.type|private$Informer) Informer() $.cacheSharedIndexInformer|raw$ { + return f.factory.$.informerFor$(&$.type|raw${}, f.defaultInformer) +} +` + +var typeInformerLister = ` +func (f *$.type|private$Informer) Lister() $.lister|raw$ { + return $.newLister|raw$(f.Informer().GetIndexer()) +} +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go new file mode 100644 index 00000000..e936e29f --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -0,0 +1,352 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "path" + "path/filepath" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + informergenargs "k8s.io/code-generator/cmd/informer-gen/args" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), + "lowercaseSingular": &lowercaseSingularNamer{}, + } +} + +// lowercaseSingularNamer implements Namer +type lowercaseSingularNamer struct{} + +// Name returns t's name in all lowercase. +func (n *lowercaseSingularNamer) Name(t *types.Type) string { + return strings.ToLower(t.Name.Name) +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// objectMetaForPackage returns the type of ObjectMeta used by package p. +func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) { + generatingForPackage := false + for _, t := range p.Types { + if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient { + continue + } + generatingForPackage = true + for _, member := range t.Members { + if member.Name == "ObjectMeta" { + return member.Type, isInternal(member), nil + } + } + } + if generatingForPackage { + return nil, false, fmt.Errorf("unable to find ObjectMeta for any types in package %s", p.Path) + } + return nil, false, nil +} + +// isInternal returns true if the tags for a member do not contain a json tag +func isInternal(m types.Member) bool { + return !strings.Contains(m.Tags, "json") +} + +func packageForInternalInterfaces(base string) string { + return filepath.Join(base, "internalinterfaces") +} + +func vendorless(p string) string { + if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { + return p[pos+len("/vendor/"):] + } + return p +} + +// Packages makes the client package definition. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs) + if !ok { + klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) + } + + internalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) + externalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) + if !customArgs.SingleDirectory { + internalVersionPackagePath = filepath.Join(arguments.OutputPackagePath, "internalversion") + externalVersionPackagePath = filepath.Join(arguments.OutputPackagePath, "externalversions") + } + + var packageList generator.Packages + typesForGroupVersion := make(map[clientgentypes.GroupVersion][]*types.Type) + + externalGroupVersions := make(map[string]clientgentypes.GroupVersions) + internalGroupVersions := make(map[string]clientgentypes.GroupVersions) + groupGoNames := make(map[string]string) + for _, inputDir := range arguments.InputDirs { + p := context.Universe.Package(vendorless(inputDir)) + + objectMeta, internal, err := objectMetaForPackage(p) + if err != nil { + klog.Fatal(err) + } + if objectMeta == nil { + // no types in this package had genclient + continue + } + + var gv clientgentypes.GroupVersion + var targetGroupVersions map[string]clientgentypes.GroupVersions + + if internal { + lastSlash := strings.LastIndex(p.Path, "/") + if lastSlash == -1 { + klog.Fatalf("error constructing internal group version for package %q", p.Path) + } + gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) + targetGroupVersions = internalGroupVersions + } else { + parts := strings.Split(p.Path, "/") + gv.Group = clientgentypes.Group(parts[len(parts)-2]) + gv.Version = clientgentypes.Version(parts[len(parts)-1]) + targetGroupVersions = externalGroupVersions + } + groupPackageName := gv.Group.NonEmpty() + gvPackage := path.Clean(p.Path) + + // If there's a comment of the form "// +groupName=somegroup" or + // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the + // group when generating. + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + gv.Group = clientgentypes.Group(override[0]) + } + + // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as + // the Go group identifier in CamelCase. It defaults + groupGoNames[groupPackageName] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) + if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + groupGoNames[groupPackageName] = namer.IC(override[0]) + } + + var typesToGenerate []*types.Type + for _, t := range p.Types { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if !tags.GenerateClient || tags.NoVerbs || !tags.HasVerb("list") || !tags.HasVerb("watch") { + continue + } + + typesToGenerate = append(typesToGenerate, t) + + if _, ok := typesForGroupVersion[gv]; !ok { + typesForGroupVersion[gv] = []*types.Type{} + } + typesForGroupVersion[gv] = append(typesForGroupVersion[gv], t) + } + if len(typesToGenerate) == 0 { + continue + } + + groupVersionsEntry, ok := targetGroupVersions[groupPackageName] + if !ok { + groupVersionsEntry = clientgentypes.GroupVersions{ + PackageName: groupPackageName, + Group: gv.Group, + } + } + groupVersionsEntry.Versions = append(groupVersionsEntry.Versions, clientgentypes.PackageVersion{Version: gv.Version, Package: gvPackage}) + targetGroupVersions[groupPackageName] = groupVersionsEntry + + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + typesToGenerate = orderer.OrderTypes(typesToGenerate) + + if internal { + packageList = append(packageList, versionPackage(internalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.InternalClientSetPackage, customArgs.ListersPackage)) + } else { + packageList = append(packageList, versionPackage(externalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.VersionedClientSetPackage, customArgs.ListersPackage)) + } + } + + if len(externalGroupVersions) != 0 { + packageList = append(packageList, factoryInterfacePackage(externalVersionPackagePath, boilerplate, customArgs.VersionedClientSetPackage)) + packageList = append(packageList, factoryPackage(externalVersionPackagePath, boilerplate, groupGoNames, externalGroupVersions, customArgs.VersionedClientSetPackage, typesForGroupVersion)) + for _, gvs := range externalGroupVersions { + packageList = append(packageList, groupPackage(externalVersionPackagePath, gvs, boilerplate)) + } + } + + if len(internalGroupVersions) != 0 { + packageList = append(packageList, factoryInterfacePackage(internalVersionPackagePath, boilerplate, customArgs.InternalClientSetPackage)) + packageList = append(packageList, factoryPackage(internalVersionPackagePath, boilerplate, groupGoNames, internalGroupVersions, customArgs.InternalClientSetPackage, typesForGroupVersion)) + for _, gvs := range internalGroupVersions { + packageList = append(packageList, groupPackage(internalVersionPackagePath, gvs, boilerplate)) + } + } + + return packageList +} + +func factoryPackage(basePackage string, boilerplate []byte, groupGoNames map[string]string, groupVersions map[string]clientgentypes.GroupVersions, clientSetPackage string, typesForGroupVersion map[clientgentypes.GroupVersion][]*types.Type) generator.Package { + return &generator.DefaultPackage{ + PackageName: filepath.Base(basePackage), + PackagePath: basePackage, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &factoryGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "factory", + }, + outputPackage: basePackage, + imports: generator.NewImportTracker(), + groupVersions: groupVersions, + clientSetPackage: clientSetPackage, + internalInterfacesPackage: packageForInternalInterfaces(basePackage), + gvGoNames: groupGoNames, + }) + + generators = append(generators, &genericGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "generic", + }, + outputPackage: basePackage, + imports: generator.NewImportTracker(), + groupVersions: groupVersions, + typesForGroupVersion: typesForGroupVersion, + groupGoNames: groupGoNames, + }) + + return generators + }, + } +} + +func factoryInterfacePackage(basePackage string, boilerplate []byte, clientSetPackage string) generator.Package { + packagePath := packageForInternalInterfaces(basePackage) + + return &generator.DefaultPackage{ + PackageName: filepath.Base(packagePath), + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &factoryInterfaceGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "factory_interfaces", + }, + outputPackage: packagePath, + imports: generator.NewImportTracker(), + clientSetPackage: clientSetPackage, + }) + + return generators + }, + } +} + +func groupPackage(basePackage string, groupVersions clientgentypes.GroupVersions, boilerplate []byte) generator.Package { + packagePath := filepath.Join(basePackage, groupVersions.PackageName) + groupPkgName := strings.Split(string(groupVersions.PackageName), ".")[0] + + return &generator.DefaultPackage{ + PackageName: groupPkgName, + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &groupInterfaceGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "interface", + }, + outputPackage: packagePath, + groupVersions: groupVersions, + imports: generator.NewImportTracker(), + internalInterfacesPackage: packageForInternalInterfaces(basePackage), + }) + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + } +} + +func versionPackage(basePackage string, groupPkgName string, gv clientgentypes.GroupVersion, groupGoName string, boilerplate []byte, typesToGenerate []*types.Type, clientSetPackage, listersPackage string) generator.Package { + packagePath := filepath.Join(basePackage, groupPkgName, strings.ToLower(gv.Version.NonEmpty())) + + return &generator.DefaultPackage{ + PackageName: strings.ToLower(gv.Version.NonEmpty()), + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &versionInterfaceGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "interface", + }, + outputPackage: packagePath, + imports: generator.NewImportTracker(), + types: typesToGenerate, + internalInterfacesPackage: packageForInternalInterfaces(basePackage), + }) + + for _, t := range typesToGenerate { + generators = append(generators, &informerGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: strings.ToLower(t.Name.Name), + }, + outputPackage: packagePath, + groupPkgName: groupPkgName, + groupVersion: gv, + groupGoName: groupGoName, + typeToGenerate: t, + imports: generator.NewImportTracker(), + clientSetPackage: clientSetPackage, + listersPackage: listersPackage, + internalInterfacesPackage: packageForInternalInterfaces(basePackage), + }) + } + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + } +} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go new file mode 100644 index 00000000..27d4bd51 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import "k8s.io/gengo/types" + +var ( + apiScheme = types.Name{Package: "k8s.io/kubernetes/pkg/api/legacyscheme", Name: "Scheme"} + cacheGenericLister = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "GenericLister"} + cacheIndexers = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "Indexers"} + cacheListWatch = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "ListWatch"} + cacheMetaNamespaceIndexFunc = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "MetaNamespaceIndexFunc"} + cacheNamespaceIndex = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NamespaceIndex"} + cacheNewGenericLister = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewGenericLister"} + cacheNewSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewSharedIndexInformer"} + cacheSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "SharedIndexInformer"} + listOptions = types.Name{Package: "k8s.io/kubernetes/pkg/apis/core", Name: "ListOptions"} + reflectType = types.Name{Package: "reflect", Name: "Type"} + runtimeObject = types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Object"} + schemaGroupResource = types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupResource"} + schemaGroupVersionResource = types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"} + syncMutex = types.Name{Package: "sync", Name: "Mutex"} + timeDuration = types.Name{Package: "time", Name: "Duration"} + v1ListOptions = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"} + metav1NamespaceAll = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "NamespaceAll"} + metav1Object = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "Object"} + watchInterface = types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"} +) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go new file mode 100644 index 00000000..f80350c5 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// versionInterfaceGenerator generates the per-version interface file. +type versionInterfaceGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + types []*types.Type + filtered bool + internalInterfacesPackage string +} + +var _ generator.Generator = &versionInterfaceGenerator{} + +func (g *versionInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *versionInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *versionInterfaceGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *versionInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + m := map[string]interface{}{ + "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), + "interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), + "types": g.types, + } + + sw.Do(versionTemplate, m) + for _, typeDef := range g.types { + tags, err := util.ParseClientGenTags(typeDef.SecondClosestCommentLines) + if err != nil { + return err + } + m["namespaced"] = !tags.NonNamespaced + m["type"] = typeDef + sw.Do(versionFuncTemplate, m) + } + + return sw.Error() +} + +var versionTemplate = ` +// Interface provides access to all the informers in this group version. +type Interface interface { + $range .types -$ + // $.|publicPlural$ returns a $.|public$Informer. + $.|publicPlural$() $.|public$Informer + $end$ +} + +type version struct { + factory $.interfacesSharedInformerFactory|raw$ + namespace string + tweakListOptions $.interfacesTweakListOptionsFunc|raw$ +} + +// New returns a new Interface. +func New(f $.interfacesSharedInformerFactory|raw$, namespace string, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} +` + +var versionFuncTemplate = ` +// $.type|publicPlural$ returns a $.type|public$Informer. +func (v *version) $.type|publicPlural$() $.type|public$Informer { + return &$.type|private$Informer{factory: v.factory$if .namespaced$, namespace: v.namespace$end$, tweakListOptions: v.tweakListOptions} +} +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go new file mode 100644 index 00000000..14f3e923 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/informer-gen/generators" + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/informer-gen/args" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move out of informer-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/informers/informers_generated" + customArgs.VersionedClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + customArgs.InternalClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + customArgs.ListersPackage = "k8s.io/kubernetes/pkg/client/listers" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/.import-restrictions b/vendor/k8s.io/code-generator/cmd/lister-gen/.import-restrictions new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/.import-restrictions @@ -0,0 +1 @@ +{} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go new file mode 100644 index 00000000..34914ea8 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + codegenutil "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct{} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = customArgs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/listers") + } + + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go new file mode 100644 index 00000000..dd45d774 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go @@ -0,0 +1,67 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// expansionGenerator produces a file for a expansion interfaces. +type expansionGenerator struct { + generator.DefaultGen + packagePath string + types []*types.Type +} + +// We only want to call GenerateType() once per group. +func (g *expansionGenerator) Filter(c *generator.Context, t *types.Type) bool { + return t == g.types[0] +} + +func (g *expansionGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + for _, t := range g.types { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if _, err := os.Stat(filepath.Join(g.packagePath, strings.ToLower(t.Name.Name+"_expansion.go"))); os.IsNotExist(err) { + sw.Do(expansionInterfaceTemplate, t) + if !tags.NonNamespaced { + sw.Do(namespacedExpansionInterfaceTemplate, t) + } + } + } + return sw.Error() +} + +var expansionInterfaceTemplate = ` +// $.|public$ListerExpansion allows custom methods to be added to +// $.|public$Lister. +type $.|public$ListerExpansion interface {} +` + +var namespacedExpansionInterfaceTemplate = ` +// $.|public$NamespaceListerExpansion allows custom methods to be added to +// $.|public$NamespaceLister. +type $.|public$NamespaceListerExpansion interface {} +` diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go new file mode 100644 index 00000000..c8ed5ad4 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go @@ -0,0 +1,371 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "io" + "path/filepath" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + + "k8s.io/klog" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), + "lowercaseSingular": &lowercaseSingularNamer{}, + } +} + +// lowercaseSingularNamer implements Namer +type lowercaseSingularNamer struct{} + +// Name returns t's name in all lowercase. +func (n *lowercaseSingularNamer) Name(t *types.Type) string { + return strings.ToLower(t.Name.Name) +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// Packages makes the client package definition. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + var packageList generator.Packages + for _, inputDir := range arguments.InputDirs { + p := context.Universe.Package(inputDir) + + objectMeta, internal, err := objectMetaForPackage(p) + if err != nil { + klog.Fatal(err) + } + if objectMeta == nil { + // no types in this package had genclient + continue + } + + var gv clientgentypes.GroupVersion + var internalGVPkg string + + if internal { + lastSlash := strings.LastIndex(p.Path, "/") + if lastSlash == -1 { + klog.Fatalf("error constructing internal group version for package %q", p.Path) + } + gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) + internalGVPkg = p.Path + } else { + parts := strings.Split(p.Path, "/") + gv.Group = clientgentypes.Group(parts[len(parts)-2]) + gv.Version = clientgentypes.Version(parts[len(parts)-1]) + + internalGVPkg = strings.Join(parts[0:len(parts)-1], "/") + } + groupPackageName := strings.ToLower(gv.Group.NonEmpty()) + + // If there's a comment of the form "// +groupName=somegroup" or + // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the + // group when generating. + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + gv.Group = clientgentypes.Group(strings.SplitN(override[0], ".", 2)[0]) + } + + var typesToGenerate []*types.Type + for _, t := range p.Types { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if !tags.GenerateClient || !tags.HasVerb("list") || !tags.HasVerb("get") { + continue + } + typesToGenerate = append(typesToGenerate, t) + } + if len(typesToGenerate) == 0 { + continue + } + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + typesToGenerate = orderer.OrderTypes(typesToGenerate) + + packagePath := filepath.Join(arguments.OutputPackagePath, groupPackageName, strings.ToLower(gv.Version.NonEmpty())) + packageList = append(packageList, &generator.DefaultPackage{ + PackageName: strings.ToLower(gv.Version.NonEmpty()), + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &expansionGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "expansion_generated", + }, + packagePath: filepath.Join(arguments.OutputBase, packagePath), + types: typesToGenerate, + }) + + for _, t := range typesToGenerate { + generators = append(generators, &listerGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: strings.ToLower(t.Name.Name), + }, + outputPackage: arguments.OutputPackagePath, + groupVersion: gv, + internalGVPkg: internalGVPkg, + typeToGenerate: t, + imports: generator.NewImportTracker(), + objectMeta: objectMeta, + }) + } + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("get") + }, + }) + } + + return packageList +} + +// objectMetaForPackage returns the type of ObjectMeta used by package p. +func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) { + generatingForPackage := false + for _, t := range p.Types { + // filter out types which dont have genclient. + if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient { + continue + } + generatingForPackage = true + for _, member := range t.Members { + if member.Name == "ObjectMeta" { + return member.Type, isInternal(member), nil + } + } + } + if generatingForPackage { + return nil, false, fmt.Errorf("unable to find ObjectMeta for any types in package %s", p.Path) + } + return nil, false, nil +} + +// isInternal returns true if the tags for a member do not contain a json tag +func isInternal(m types.Member) bool { + return !strings.Contains(m.Tags, "json") +} + +// listerGenerator produces a file of listers for a given GroupVersion and +// type. +type listerGenerator struct { + generator.DefaultGen + outputPackage string + groupVersion clientgentypes.GroupVersion + internalGVPkg string + typeToGenerate *types.Type + imports namer.ImportTracker + objectMeta *types.Type +} + +var _ generator.Generator = &listerGenerator{} + +func (g *listerGenerator) Filter(c *generator.Context, t *types.Type) bool { + return t == g.typeToGenerate +} + +func (g *listerGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *listerGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + imports = append(imports, "k8s.io/apimachinery/pkg/api/errors") + imports = append(imports, "k8s.io/apimachinery/pkg/labels") + // for Indexer + imports = append(imports, "k8s.io/client-go/tools/cache") + return +} + +func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + klog.V(5).Infof("processing type %v", t) + m := map[string]interface{}{ + "Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}), + "type": t, + "objectMeta": g.objectMeta, + } + + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + + if tags.NonNamespaced { + sw.Do(typeListerInterface_NonNamespaced, m) + } else { + sw.Do(typeListerInterface, m) + } + + sw.Do(typeListerStruct, m) + sw.Do(typeListerConstructor, m) + sw.Do(typeLister_List, m) + + if tags.NonNamespaced { + sw.Do(typeLister_NonNamespacedGet, m) + return sw.Error() + } + + sw.Do(typeLister_NamespaceLister, m) + sw.Do(namespaceListerInterface, m) + sw.Do(namespaceListerStruct, m) + sw.Do(namespaceLister_List, m) + sw.Do(namespaceLister_Get, m) + + return sw.Error() +} + +var typeListerInterface = ` +// $.type|public$Lister helps list $.type|publicPlural$. +type $.type|public$Lister interface { + // List lists all $.type|publicPlural$ in the indexer. + List(selector labels.Selector) (ret []*$.type|raw$, err error) + // $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$. + $.type|publicPlural$(namespace string) $.type|public$NamespaceLister + $.type|public$ListerExpansion +} +` + +var typeListerInterface_NonNamespaced = ` +// $.type|public$Lister helps list $.type|publicPlural$. +type $.type|public$Lister interface { + // List lists all $.type|publicPlural$ in the indexer. + List(selector labels.Selector) (ret []*$.type|raw$, err error) + // Get retrieves the $.type|public$ from the index for a given name. + Get(name string) (*$.type|raw$, error) + $.type|public$ListerExpansion +} +` + +var typeListerStruct = ` +// $.type|private$Lister implements the $.type|public$Lister interface. +type $.type|private$Lister struct { + indexer cache.Indexer +} +` + +var typeListerConstructor = ` +// New$.type|public$Lister returns a new $.type|public$Lister. +func New$.type|public$Lister(indexer cache.Indexer) $.type|public$Lister { + return &$.type|private$Lister{indexer: indexer} +} +` + +var typeLister_List = ` +// List lists all $.type|publicPlural$ in the indexer. +func (s *$.type|private$Lister) List(selector labels.Selector) (ret []*$.type|raw$, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*$.type|raw$)) + }) + return ret, err +} +` + +var typeLister_NamespaceLister = ` +// $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$. +func (s *$.type|private$Lister) $.type|publicPlural$(namespace string) $.type|public$NamespaceLister { + return $.type|private$NamespaceLister{indexer: s.indexer, namespace: namespace} +} +` + +var typeLister_NonNamespacedGet = ` +// Get retrieves the $.type|public$ from the index for a given name. +func (s *$.type|private$Lister) Get(name string) (*$.type|raw$, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound($.Resource|raw$("$.type|lowercaseSingular$"), name) + } + return obj.(*$.type|raw$), nil +} +` + +var namespaceListerInterface = ` +// $.type|public$NamespaceLister helps list and get $.type|publicPlural$. +type $.type|public$NamespaceLister interface { + // List lists all $.type|publicPlural$ in the indexer for a given namespace. + List(selector labels.Selector) (ret []*$.type|raw$, err error) + // Get retrieves the $.type|public$ from the indexer for a given namespace and name. + Get(name string) (*$.type|raw$, error) + $.type|public$NamespaceListerExpansion +} +` + +var namespaceListerStruct = ` +// $.type|private$NamespaceLister implements the $.type|public$NamespaceLister +// interface. +type $.type|private$NamespaceLister struct { + indexer cache.Indexer + namespace string +} +` + +var namespaceLister_List = ` +// List lists all $.type|publicPlural$ in the indexer for a given namespace. +func (s $.type|private$NamespaceLister) List(selector labels.Selector) (ret []*$.type|raw$, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*$.type|raw$)) + }) + return ret, err +} +` + +var namespaceLister_Get = ` +// Get retrieves the $.type|public$ from the indexer for a given namespace and name. +func (s $.type|private$NamespaceLister) Get(name string) (*$.type|raw$, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound($.Resource|raw$("$.type|lowercaseSingular$"), name) + } + return obj.(*$.type|raw$), nil +} +` diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go new file mode 100644 index 00000000..aca16b2b --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/lister-gen/generators" + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/lister-gen/args" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of lister-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/listers" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go b/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go new file mode 100644 index 00000000..b1098c01 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package generates openAPI definition file to be used in open API spec generation on API servers. To generate +// definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. To +// exclude a type from a tagged package, add "+k8s:openapi-gen=false" tag to the type comment lines. + +package main + +import ( + "flag" + "log" + + generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" + "k8s.io/kube-openapi/pkg/generators" + + "github.com/spf13/pflag" + + "k8s.io/klog" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + log.Fatalf("Arguments validation error: %v", err) + } + + // Generates the code for the OpenAPIDefinitions. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + log.Fatalf("OpenAPI code generation error: %v", err) + } +} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go new file mode 100644 index 00000000..2e3ab084 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "k8s.io/gengo/args" +) + +// NewDefaults returns default arguments for the generator. +func NewDefaults() *args.GeneratorArgs { + genericArgs := args.Default().WithoutDefaultFlagParsing() + genericArgs.OutputFileBaseName = "zz_generated.register" + return genericArgs +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go new file mode 100644 index 00000000..5186e421 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -0,0 +1,137 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "os" + "path" + "strings" + + "k8s.io/klog" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{} +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// Packages makes packages to generate. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + packages := generator.Packages{} + for _, inputDir := range arguments.InputDirs { + pkg := context.Universe.Package(inputDir) + internal, err := isInternal(pkg) + if err != nil { + klog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) + continue + } + if internal { + klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) + continue + } + registerFileName := "register.go" + searchPath := path.Join(args.DefaultSourceTree(), inputDir, registerFileName) + if _, err := os.Stat(path.Join(searchPath)); err == nil { + klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) + continue + } else if err != nil && !os.IsNotExist(err) { + klog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) + } + + gv := clientgentypes.GroupVersion{} + { + pathParts := strings.Split(pkg.Path, "/") + if len(pathParts) < 2 { + klog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path) + continue + } + gv.Group = clientgentypes.Group(pathParts[len(pathParts)-2]) + gv.Version = clientgentypes.Version(pathParts[len(pathParts)-1]) + + // if there is a comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io", + // extract the fully qualified API group name from it and overwrite the group inferred from the package path + if override := types.ExtractCommentTags("+", pkg.DocComments)["groupName"]; override != nil { + groupName := override[0] + klog.V(5).Infof("overriding the group name with = %s", groupName) + gv.Group = clientgentypes.Group(groupName) + } + } + + typesToRegister := []*types.Type{} + for _, t := range pkg.Types { + klog.V(5).Infof("considering type = %s", t.Name.String()) + for _, typeMember := range t.Members { + if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { + typesToRegister = append(typesToRegister, t) + } + } + } + + packages = append(packages, + &generator.DefaultPackage{ + PackageName: pkg.Name, + PackagePath: pkg.Path, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + ®isterExternalGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: arguments.OutputFileBaseName, + }, + gv: gv, + typesToGenerate: typesToRegister, + outputPackage: pkg.Path, + imports: generator.NewImportTracker(), + }, + } + }, + }) + } + + return packages +} + +// isInternal determines whether the given package +// contains the internal types or not +func isInternal(p *types.Package) (bool, error) { + for _, t := range p.Types { + for _, member := range t.Members { + if member.Name == "TypeMeta" { + return !strings.Contains(member.Tags, "json"), nil + } + } + } + return false, fmt.Errorf("unable to find TypeMeta for any types in package %s", p.Path) +} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go new file mode 100644 index 00000000..c831c575 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go @@ -0,0 +1,117 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "sort" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +type registerExternalGenerator struct { + generator.DefaultGen + outputPackage string + gv clientgentypes.GroupVersion + typesToGenerate []*types.Type + imports namer.ImportTracker +} + +var _ generator.Generator = ®isterExternalGenerator{} + +func (g *registerExternalGenerator) Filter(_ *generator.Context, _ *types.Type) bool { + return false +} + +func (g *registerExternalGenerator) Imports(c *generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +func (g *registerExternalGenerator) Namers(_ *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *registerExternalGenerator) Finalize(context *generator.Context, w io.Writer) error { + typesToGenerateOnlyNames := make([]string, len(g.typesToGenerate)) + for index, typeToGenerate := range g.typesToGenerate { + typesToGenerateOnlyNames[index] = typeToGenerate.Name.Name + } + + // sort the list of types to register, so that the generator produces stable output + sort.Strings(typesToGenerateOnlyNames) + + sw := generator.NewSnippetWriter(w, context, "$", "$") + m := map[string]interface{}{ + "groupName": g.gv.Group, + "version": g.gv.Version, + "types": typesToGenerateOnlyNames, + "addToGroupVersion": context.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "AddToGroupVersion"}), + "groupVersion": context.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GroupVersion"}), + } + sw.Do(registerExternalTypesTemplate, m) + return sw.Error() +} + +var registerExternalTypesTemplate = ` +// GroupName specifies the group name used to register the objects. +const GroupName = "$.groupName$" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = $.groupVersion|raw${Group: GroupName, Version: "$.version$"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "$.version$"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Depreciated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + $range .types -$ + &$.${}, + $end$ + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + $.addToGroupVersion|raw$(scheme, SchemeGroupVersion) + return nil +} +` diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/main.go b/vendor/k8s.io/code-generator/cmd/register-gen/main.go new file mode 100644 index 00000000..30a175d8 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/register-gen/main.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/register-gen/args" + "k8s.io/code-generator/cmd/register-gen/generators" + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" +) + +func main() { + klog.InitFlags(nil) + genericArgs := generatorargs.NewDefaults() + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + + pflag.Parse() + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore b/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore new file mode 100644 index 00000000..ffe6458c --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore @@ -0,0 +1 @@ +set-gen diff --git a/vendor/k8s.io/code-generator/cmd/set-gen/main.go b/vendor/k8s.io/code-generator/cmd/set-gen/main.go new file mode 100644 index 00000000..45694d4f --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/set-gen/main.go @@ -0,0 +1,56 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// set-gen is an example usage of gengo. +// +// Structs in the input directories with the below line in their comments will +// have sets generated for them. +// // +genset +// +// Any builtin type referenced anywhere in the input directories will have a +// set generated for it. +package main + +import ( + "os" + "path/filepath" + + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/set-gen/generators" + + "k8s.io/klog" +) + +func main() { + klog.InitFlags(nil) + arguments := args.Default() + + // Override defaults. + arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + arguments.InputDirs = []string{"k8s.io/kubernetes/pkg/util/sets/types"} + arguments.OutputPackagePath = "k8s.io/apimachinery/pkg/util/sets" + + if err := arguments.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Errorf("Error: %v", err) + os.Exit(1) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/code-of-conduct.md b/vendor/k8s.io/code-generator/code-of-conduct.md new file mode 100644 index 00000000..0d15c00c --- /dev/null +++ b/vendor/k8s.io/code-generator/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh new file mode 100644 index 00000000..d82002dd --- /dev/null +++ b/vendor/k8s.io/code-generator/generate-groups.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# generate-groups generates everything for a project with external types only, e.g. a project based +# on CustomResourceDefinitions. + +if [ "$#" -lt 4 ] || [ "${1}" == "--help" ]; then + cat < ... + + the generators comma separated to run (deepcopy,defaulter,client,lister,informer) or "all". + the output package name (e.g. github.com/example/project/pkg/generated). + the external types dir (e.g. github.com/example/api or github.com/example/project/pkg/apis). + the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative + to . + ... arbitrary flags passed to all generator binaries. + + +Examples: + $(basename "$0") all github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" + $(basename "$0") deepcopy,client github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" +EOF + exit 0 +fi + +GENS="$1" +OUTPUT_PKG="$2" +APIS_PKG="$3" +GROUPS_WITH_VERSIONS="$4" +shift 4 + +( + # To support running this script from anywhere, we have to first cd into this directory + # so we can install the tools. + cd "$(dirname "${0}")" + go install ./cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} +) + +function codegen::join() { local IFS="$1"; shift; echo "$*"; } + +# enumerate group versions +FQ_APIS=() # e.g. k8s.io/api/apps/v1 +for GVs in ${GROUPS_WITH_VERSIONS}; do + IFS=: read -r G Vs <<<"${GVs}" + + # enumerate versions + for V in ${Vs//,/ }; do + FQ_APIS+=("${APIS_PKG}/${G}/${V}") + done +done + +if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then + echo "Generating deepcopy funcs" + "${GOPATH}/bin/deepcopy-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${APIS_PKG}" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then + echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" + "${GOPATH}/bin/client-gen" --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" --input-base "" --input "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then + echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers" + "${GOPATH}/bin/lister-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/listers" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then + echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" + "${GOPATH}/bin/informer-gen" \ + --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \ + --versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \ + --listers-package "${OUTPUT_PKG}/listers" \ + --output-package "${OUTPUT_PKG}/informers" \ + "$@" +fi diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh new file mode 100644 index 00000000..8c31d933 --- /dev/null +++ b/vendor/k8s.io/code-generator/generate-internal-groups.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# generate-internal-groups generates everything for a project with internal types, e.g. an +# user-provided API server based on k8s.io/apiserver. + +if [ "$#" -lt 5 ] || [ "${1}" == "--help" ]; then + cat < ... + + the generators comma separated to run (deepcopy,defaulter,conversion,client,lister,informer,openapi) or "all". + the output package name (e.g. github.com/example/project/pkg/generated). + the internal types dir (e.g. github.com/example/project/pkg/apis). + the external types dir (e.g. github.com/example/project/pkg/apis or githubcom/example/apis). + the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative + to . + ... arbitrary flags passed to all generator binaries. + +Examples: + $(basename "$0") all github.com/example/project/pkg/client github.com/example/project/pkg/apis github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" + $(basename "$0") deepcopy,defaulter,conversion github.com/example/project/pkg/client github.com/example/project/pkg/apis github.com/example/project/apis "foo:v1 bar:v1alpha1,v1beta1" +EOF + exit 0 +fi + +GENS="$1" +OUTPUT_PKG="$2" +INT_APIS_PKG="$3" +EXT_APIS_PKG="$4" +GROUPS_WITH_VERSIONS="$5" +shift 5 + +go install ./"$(dirname "${0}")"/cmd/{defaulter-gen,conversion-gen,client-gen,lister-gen,informer-gen,deepcopy-gen,openapi-gen} + +function codegen::join() { local IFS="$1"; shift; echo "$*"; } + +# enumerate group versions +ALL_FQ_APIS=() # e.g. k8s.io/kubernetes/pkg/apis/apps k8s.io/api/apps/v1 +INT_FQ_APIS=() # e.g. k8s.io/kubernetes/pkg/apis/apps +EXT_FQ_APIS=() # e.g. k8s.io/api/apps/v1 +for GVs in ${GROUPS_WITH_VERSIONS}; do + IFS=: read -r G Vs <<<"${GVs}" + + if [ -n "${INT_APIS_PKG}" ]; then + ALL_FQ_APIS+=("${INT_APIS_PKG}/${G}") + INT_FQ_APIS+=("${INT_APIS_PKG}/${G}") + fi + + # enumerate versions + for V in ${Vs//,/ }; do + ALL_FQ_APIS+=("${EXT_APIS_PKG}/${G}/${V}") + EXT_FQ_APIS+=("${EXT_APIS_PKG}/${G}/${V}") + done +done + +if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then + echo "Generating deepcopy funcs" + "${GOPATH}/bin/deepcopy-gen" --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${INT_APIS_PKG},${EXT_APIS_PKG}" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "defaulter" <<<"${GENS}"; then + echo "Generating defaulters" + "${GOPATH}/bin/defaulter-gen" --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}")" -O zz_generated.defaults "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "conversion" <<<"${GENS}"; then + echo "Generating conversions" + "${GOPATH}/bin/conversion-gen" --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.conversion "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then + echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" + if [ -n "${INT_APIS_PKG}" ]; then + IFS=" " read -r -a APIS <<< "$(printf '%s/ ' "${INT_FQ_APIS[@]}")" + "${GOPATH}/bin/client-gen" --clientset-name "${CLIENTSET_NAME_INTERNAL:-internalversion}" --input-base "" --input "$(codegen::join , "${APIS[@]}")" --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" "$@" + fi + "${GOPATH}/bin/client-gen" --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" --input-base "" --input "$(codegen::join , "${EXT_FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then + echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers" + "${GOPATH}/bin/lister-gen" --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/listers" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then + echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" + "${GOPATH}/bin/informer-gen" \ + --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" \ + --versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \ + --internal-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_INTERNAL:-internalversion}" \ + --listers-package "${OUTPUT_PKG}/listers" \ + --output-package "${OUTPUT_PKG}/informers" \ + "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "openapi" <<<"${GENS}"; then + echo "Generating OpenAPI definitions for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/openapi" + declare -a OPENAPI_EXTRA_PACKAGES + "${GOPATH}/bin/openapi-gen" \ + --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}" "${OPENAPI_EXTRA_PACKAGES[@]}")" \ + --input-dirs "k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version" \ + --output-package "${OUTPUT_PKG}/openapi" \ + -O zz_generated.openapi \ + "$@" +fi diff --git a/vendor/k8s.io/code-generator/go.mod b/vendor/k8s.io/code-generator/go.mod new file mode 100644 index 00000000..6317da60 --- /dev/null +++ b/vendor/k8s.io/code-generator/go.mod @@ -0,0 +1,25 @@ +// This is a generated file. Do not edit directly. + +module k8s.io/code-generator + +go 1.12 + +require ( + github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/go-openapi/jsonpointer v0.19.3 // indirect + github.com/go-openapi/spec v0.19.2 // indirect + github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/json-iterator/go v1.1.7 // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/spf13/pflag v1.0.5 + golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect + golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 // indirect + gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 + gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect + k8s.io/gengo v0.0.0-20190822140433-26a664648505 + k8s.io/klog v1.0.0 + k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31 // release-1.16 +) + +replace golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 diff --git a/vendor/k8s.io/code-generator/go.sum b/vendor/k8s.io/code-generator/go.sum new file mode 100644 index 00000000..d862c39f --- /dev/null +++ b/vendor/k8s.io/code-generator/go.sum @@ -0,0 +1,132 @@ +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 h1:PVCvyir09Xgta5zksNZDkrL+eSm/Y+gQxRG3IfqNQ3A= +golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31 h1:PsbYeEz2x7ll6JYUzBEG+DT78910DDTlvn5Ma10F5/E= +k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go new file mode 100644 index 00000000..fd8c3a85 --- /dev/null +++ b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// TagOverrideNamer is a namer which pulls names from a given tag, if specified, +// and otherwise falls back to a different namer. +type TagOverrideNamer struct { + tagName string + fallback namer.Namer +} + +// Name returns the tag value if it exists. It no tag was found the fallback namer will be used +func (n *TagOverrideNamer) Name(t *types.Type) string { + if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { + return nameOverride + } + + return n.fallback.Name(t) +} + +// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as +// the name, or falls back to another Namer if the tag is not present. +func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { + return &TagOverrideNamer{ + tagName: tagName, + fallback: fallback, + } +} + +// extractTag gets the comment-tags for the key. If the tag did not exist, it +// returns the empty string. +func extractTag(key string, lines []string) string { + val, present := types.ExtractCommentTags("+", lines)[key] + if !present || len(val) < 1 { + return "" + } + + return val[0] +} diff --git a/vendor/k8s.io/code-generator/pkg/util/build.go b/vendor/k8s.io/code-generator/pkg/util/build.go new file mode 100644 index 00000000..6ea8f52e --- /dev/null +++ b/vendor/k8s.io/code-generator/pkg/util/build.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + gobuild "go/build" + "path" + "path/filepath" + "reflect" + "strings" +) + +type empty struct{} + +// CurrentPackage returns the go package of the current directory, or "" if it cannot +// be derived from the GOPATH. +func CurrentPackage() string { + for _, root := range gobuild.Default.SrcDirs() { + if pkg, ok := hasSubdir(root, "."); ok { + return pkg + } + } + return "" +} + +func hasSubdir(root, dir string) (rel string, ok bool) { + // ensure a tailing separator to properly compare on word-boundaries + const sep = string(filepath.Separator) + root = filepath.Clean(root) + if !strings.HasSuffix(root, sep) { + root += sep + } + + // check whether root dir starts with root + dir = filepath.Clean(dir) + if !strings.HasPrefix(dir, root) { + return "", false + } + + // cut off root + return filepath.ToSlash(dir[len(root):]), true +} + +// BoilerplatePath uses the boilerplate in code-generator by calculating the relative path to it. +func BoilerplatePath() string { + return path.Join(reflect.TypeOf(empty{}).PkgPath(), "/../../hack/boilerplate.go.txt") +} diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go b/vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go new file mode 100644 index 00000000..67957ee3 --- /dev/null +++ b/vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go @@ -0,0 +1,91 @@ +//This package is copied from Go library reflect/type.go. +//The struct tag library provides no way to extract the list of struct tags, only +//a specific tag +package reflect + +import ( + "fmt" + + "strconv" + "strings" +) + +type StructTag struct { + Name string + Value string +} + +func (t StructTag) String() string { + return fmt.Sprintf("%s:%q", t.Name, t.Value) +} + +type StructTags []StructTag + +func (tags StructTags) String() string { + s := make([]string, 0, len(tags)) + for _, tag := range tags { + s = append(s, tag.String()) + } + return "`" + strings.Join(s, " ") + "`" +} + +func (tags StructTags) Has(name string) bool { + for i := range tags { + if tags[i].Name == name { + return true + } + } + return false +} + +// ParseStructTags returns the full set of fields in a struct tag in the order they appear in +// the struct tag. +func ParseStructTags(tag string) (StructTags, error) { + tags := StructTags{} + for tag != "" { + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + break + } + name := string(tag[:i]) + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + break + } + qvalue := string(tag[:i+1]) + tag = tag[i+1:] + + value, err := strconv.Unquote(qvalue) + if err != nil { + return nil, err + } + tags = append(tags, StructTag{Name: name, Value: value}) + } + return tags, nil +} diff --git a/vendor/k8s.io/code-generator/tools.go b/vendor/k8s.io/code-generator/tools.go new file mode 100644 index 00000000..7d13de5a --- /dev/null +++ b/vendor/k8s.io/code-generator/tools.go @@ -0,0 +1,35 @@ +// +build tools + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains code generation utilities +// This package imports things required by build scripts, to force `go mod` to see them as dependencies +package tools + +import ( + _ "k8s.io/code-generator/cmd/client-gen" + _ "k8s.io/code-generator/cmd/conversion-gen" + _ "k8s.io/code-generator/cmd/deepcopy-gen" + _ "k8s.io/code-generator/cmd/defaulter-gen" + _ "k8s.io/code-generator/cmd/go-to-protobuf" + _ "k8s.io/code-generator/cmd/import-boss" + _ "k8s.io/code-generator/cmd/informer-gen" + _ "k8s.io/code-generator/cmd/lister-gen" + _ "k8s.io/code-generator/cmd/openapi-gen" + _ "k8s.io/code-generator/cmd/register-gen" + _ "k8s.io/code-generator/cmd/set-gen" +) diff --git a/vendor/k8s.io/gengo/LICENSE b/vendor/k8s.io/gengo/LICENSE new file mode 100644 index 00000000..00b24011 --- /dev/null +++ b/vendor/k8s.io/gengo/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/gengo/args/args.go b/vendor/k8s.io/gengo/args/args.go new file mode 100644 index 00000000..7401098c --- /dev/null +++ b/vendor/k8s.io/gengo/args/args.go @@ -0,0 +1,209 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package args has common command-line flags for generation programs. +package args + +import ( + "bytes" + goflag "flag" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/parser" + "k8s.io/gengo/types" + + "github.com/spf13/pflag" +) + +// Default returns a defaulted GeneratorArgs. You may change the defaults +// before calling AddFlags. +func Default() *GeneratorArgs { + return &GeneratorArgs{ + OutputBase: DefaultSourceTree(), + GoHeaderFilePath: filepath.Join(DefaultSourceTree(), "k8s.io/gengo/boilerplate/boilerplate.go.txt"), + GeneratedBuildTag: "ignore_autogenerated", + GeneratedByCommentTemplate: "// Code generated by GENERATOR_NAME. DO NOT EDIT.", + defaultCommandLineFlags: true, + } +} + +// GeneratorArgs has arguments that are passed to generators. +type GeneratorArgs struct { + // Which directories to parse. + InputDirs []string + + // Source tree to write results to. + OutputBase string + + // Package path within the source tree. + OutputPackagePath string + + // Output file name. + OutputFileBaseName string + + // Where to get copyright header text. + GoHeaderFilePath string + + // If GeneratedByCommentTemplate is set, generate a "Code generated by" comment + // below the bloilerplate, of the format defined by this string. + // Any instances of "GENERATOR_NAME" will be replaced with the name of the code generator. + GeneratedByCommentTemplate string + + // If true, only verify, don't write anything. + VerifyOnly bool + + // If true, include *_test.go files + IncludeTestFiles bool + + // GeneratedBuildTag is the tag used to identify code generated by execution + // of this type. Each generator should use a different tag, and different + // groups of generators (external API that depends on Kube generations) should + // keep tags distinct as well. + GeneratedBuildTag string + + // Any custom arguments go here + CustomArgs interface{} + + // Whether to use default command line flags + defaultCommandLineFlags bool +} + +// WithoutDefaultFlagParsing disables implicit addition of command line flags and parsing. +func (g *GeneratorArgs) WithoutDefaultFlagParsing() *GeneratorArgs { + g.defaultCommandLineFlags = false + return g +} + +func (g *GeneratorArgs) AddFlags(fs *pflag.FlagSet) { + fs.StringSliceVarP(&g.InputDirs, "input-dirs", "i", g.InputDirs, "Comma-separated list of import paths to get input types from.") + fs.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/ or ./ if $GOPATH is not set.") + fs.StringVarP(&g.OutputPackagePath, "output-package", "p", g.OutputPackagePath, "Base package path.") + fs.StringVarP(&g.OutputFileBaseName, "output-file-base", "O", g.OutputFileBaseName, "Base name (without .go suffix) for output files.") + fs.StringVarP(&g.GoHeaderFilePath, "go-header-file", "h", g.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.") + fs.BoolVar(&g.VerifyOnly, "verify-only", g.VerifyOnly, "If true, only verify existing output, do not write anything.") + fs.StringVar(&g.GeneratedBuildTag, "build-tag", g.GeneratedBuildTag, "A Go build tag to use to identify files generated by this command. Should be unique.") +} + +// LoadGoBoilerplate loads the boilerplate file passed to --go-header-file. +func (g *GeneratorArgs) LoadGoBoilerplate() ([]byte, error) { + b, err := ioutil.ReadFile(g.GoHeaderFilePath) + if err != nil { + return nil, err + } + b = bytes.Replace(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().Year())), -1) + + if g.GeneratedByCommentTemplate != "" { + if len(b) != 0 { + b = append(b, byte('\n')) + } + generatorName := path.Base(os.Args[0]) + generatedByComment := strings.Replace(g.GeneratedByCommentTemplate, "GENERATOR_NAME", generatorName, -1) + s := fmt.Sprintf("%s\n\n", generatedByComment) + b = append(b, []byte(s)...) + } + return b, nil +} + +// NewBuilder makes a new parser.Builder and populates it with the input +// directories. +func (g *GeneratorArgs) NewBuilder() (*parser.Builder, error) { + b := parser.New() + + // flag for including *_test.go + b.IncludeTestFiles = g.IncludeTestFiles + + // Ignore all auto-generated files. + b.AddBuildTags(g.GeneratedBuildTag) + + for _, d := range g.InputDirs { + var err error + if strings.HasSuffix(d, "/...") { + err = b.AddDirRecursive(strings.TrimSuffix(d, "/...")) + } else { + err = b.AddDir(d) + } + if err != nil { + return nil, fmt.Errorf("unable to add directory %q: %v", d, err) + } + } + return b, nil +} + +// InputIncludes returns true if the given package is a (sub) package of one of +// the InputDirs. +func (g *GeneratorArgs) InputIncludes(p *types.Package) bool { + for _, dir := range g.InputDirs { + d := dir + if strings.HasSuffix(d, "...") { + d = strings.TrimSuffix(d, "...") + } + if strings.HasPrefix(p.Path, d) { + return true + } + } + return false +} + +// DefaultSourceTree returns the /src directory of the first entry in $GOPATH. +// If $GOPATH is empty, it returns "./". Useful as a default output location. +func DefaultSourceTree() string { + paths := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator)) + if len(paths) > 0 && len(paths[0]) > 0 { + return filepath.Join(paths[0], "src") + } + return "./" +} + +// Execute implements main(). +// If you don't need any non-default behavior, use as: +// args.Default().Execute(...) +func (g *GeneratorArgs) Execute(nameSystems namer.NameSystems, defaultSystem string, pkgs func(*generator.Context, *GeneratorArgs) generator.Packages) error { + if g.defaultCommandLineFlags { + g.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + pflag.Parse() + } + + b, err := g.NewBuilder() + if err != nil { + return fmt.Errorf("Failed making a parser: %v", err) + } + + // pass through the flag on whether to include *_test.go files + b.IncludeTestFiles = g.IncludeTestFiles + + c, err := generator.NewContext(b, nameSystems, defaultSystem) + if err != nil { + return fmt.Errorf("Failed making a context: %v", err) + } + + c.Verify = g.VerifyOnly + packages := pkgs(c, g) + if err := c.ExecutePackages(g.OutputBase, packages); err != nil { + return fmt.Errorf("Failed executing generator: %v", err) + } + + return nil +} diff --git a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go new file mode 100644 index 00000000..40f1306d --- /dev/null +++ b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go @@ -0,0 +1,924 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "io" + "path/filepath" + "sort" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/examples/set-gen/sets" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// CustomArgs is used tby the go2idl framework to pass args specific to this +// generator. +type CustomArgs struct { + BoundingDirs []string // Only deal with types rooted under these dirs. +} + +// This is the comment tag that carries parameters for deep-copy generation. +const ( + tagEnabledName = "k8s:deepcopy-gen" + interfacesTagName = tagEnabledName + ":interfaces" + interfacesNonPointerTagName = tagEnabledName + ":nonpointer-interfaces" // attach the DeepCopy methods to the +) + +// Known values for the comment tag. +const tagValuePackage = "package" + +// enabledTagValue holds parameters from a tagName tag. +type enabledTagValue struct { + value string + register bool +} + +func extractEnabledTypeTag(t *types.Type) *enabledTagValue { + comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...) + return extractEnabledTag(comments) +} + +func extractEnabledTag(comments []string) *enabledTagValue { + tagVals := types.ExtractCommentTags("+", comments)[tagEnabledName] + if tagVals == nil { + // No match for the tag. + return nil + } + // If there are multiple values, abort. + if len(tagVals) > 1 { + klog.Fatalf("Found %d %s tags: %q", len(tagVals), tagEnabledName, tagVals) + } + + // If we got here we are returning something. + tag := &enabledTagValue{} + + // Get the primary value. + parts := strings.Split(tagVals[0], ",") + if len(parts) >= 1 { + tag.value = parts[0] + } + + // Parse extra arguments. + parts = parts[1:] + for i := range parts { + kv := strings.SplitN(parts[i], "=", 2) + k := kv[0] + v := "" + if len(kv) == 2 { + v = kv[1] + } + switch k { + case "register": + if v != "false" { + tag.register = true + } + default: + klog.Fatalf("Unsupported %s param: %q", tagEnabledName, parts[i]) + } + } + return tag +} + +// TODO: This is created only to reduce number of changes in a single PR. +// Remove it and use PublicNamer instead. +func deepCopyNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Join: func(pre string, in []string, post string) string { + return strings.Join(in, "_") + }, + PrependPackageNames: 1, + } +} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": deepCopyNamer(), + "raw": namer.NewRawNamer("", nil), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + inputs := sets.NewString(context.Inputs...) + packages := generator.Packages{} + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + + boundingDirs := []string{} + if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok { + if customArgs.BoundingDirs == nil { + customArgs.BoundingDirs = context.Inputs + } + for i := range customArgs.BoundingDirs { + // Strip any trailing slashes - they are not exactly "correct" but + // this is friendlier. + boundingDirs = append(boundingDirs, strings.TrimRight(customArgs.BoundingDirs[i], "/")) + } + } + + for i := range inputs { + klog.V(5).Infof("Considering pkg %q", i) + pkg := context.Universe[i] + if pkg == nil { + // If the input had no Go files, for example. + continue + } + + ptag := extractEnabledTag(pkg.Comments) + ptagValue := "" + ptagRegister := false + if ptag != nil { + ptagValue = ptag.value + if ptagValue != tagValuePackage { + klog.Fatalf("Package %v: unsupported %s value: %q", i, tagEnabledName, ptagValue) + } + ptagRegister = ptag.register + klog.V(5).Infof(" tag.value: %q, tag.register: %t", ptagValue, ptagRegister) + } else { + klog.V(5).Infof(" no tag") + } + + // If the pkg-scoped tag says to generate, we can skip scanning types. + pkgNeedsGeneration := (ptagValue == tagValuePackage) + if !pkgNeedsGeneration { + // If the pkg-scoped tag did not exist, scan all types for one that + // explicitly wants generation. + for _, t := range pkg.Types { + klog.V(5).Infof(" considering type %q", t.Name.String()) + ttag := extractEnabledTypeTag(t) + if ttag != nil && ttag.value == "true" { + klog.V(5).Infof(" tag=true") + if !copyableType(t) { + klog.Fatalf("Type %v requests deepcopy generation but is not copyable", t) + } + pkgNeedsGeneration = true + break + } + } + } + + if pkgNeedsGeneration { + klog.V(3).Infof("Package %q needs generation", i) + path := pkg.Path + // if the source path is within a /vendor/ directory (for example, + // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow + // generation to output to the proper relative path (under vendor). + // Otherwise, the generator will create the file in the wrong location + // in the output directory. + // TODO: build a more fundamental concept in gengo for dealing with modifications + // to vendored packages. + if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { + expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) + if strings.Contains(expandedPath, "/vendor/") { + path = expandedPath + } + } + packages = append(packages, + &generator.DefaultPackage{ + PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0], + PackagePath: path, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenDeepCopy(arguments.OutputFileBaseName, pkg.Path, boundingDirs, (ptagValue == tagValuePackage), ptagRegister), + } + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return t.Name.Package == pkg.Path + }, + }) + } + } + return packages +} + +// genDeepCopy produces a file with autogenerated deep-copy functions. +type genDeepCopy struct { + generator.DefaultGen + targetPackage string + boundingDirs []string + allTypes bool + registerTypes bool + imports namer.ImportTracker + typesForInit []*types.Type +} + +func NewGenDeepCopy(sanitizedName, targetPackage string, boundingDirs []string, allTypes, registerTypes bool) generator.Generator { + return &genDeepCopy{ + DefaultGen: generator.DefaultGen{ + OptionalName: sanitizedName, + }, + targetPackage: targetPackage, + boundingDirs: boundingDirs, + allTypes: allTypes, + registerTypes: registerTypes, + imports: generator.NewImportTracker(), + typesForInit: make([]*types.Type, 0), + } +} + +func (g *genDeepCopy) Namers(c *generator.Context) namer.NameSystems { + // Have the raw namer for this file track what it imports. + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.targetPackage, g.imports), + } +} + +func (g *genDeepCopy) Filter(c *generator.Context, t *types.Type) bool { + // Filter out types not being processed or not copyable within the package. + enabled := g.allTypes + if !enabled { + ttag := extractEnabledTypeTag(t) + if ttag != nil && ttag.value == "true" { + enabled = true + } + } + if !enabled { + return false + } + if !copyableType(t) { + klog.V(2).Infof("Type %v is not copyable", t) + return false + } + klog.V(4).Infof("Type %v is copyable", t) + g.typesForInit = append(g.typesForInit, t) + return true +} + +func (g *genDeepCopy) copyableAndInBounds(t *types.Type) bool { + if !copyableType(t) { + return false + } + // Only packages within the restricted range can be processed. + if !isRootedUnder(t.Name.Package, g.boundingDirs) { + return false + } + return true +} + +// deepCopyMethod returns the signature of a DeepCopy() method, nil or an error +// if the type does not match. This allows more efficient deep copy +// implementations to be defined by the type's author. The correct signature +// for a type T is: +// func (t T) DeepCopy() T +// or: +// func (t *T) DeepCopy() *T +func deepCopyMethod(t *types.Type) (*types.Signature, error) { + f, found := t.Methods["DeepCopy"] + if !found { + return nil, nil + } + if len(f.Signature.Parameters) != 0 { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no parameters", t) + } + if len(f.Signature.Results) != 1 { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one result", t) + } + + ptrResult := f.Signature.Results[0].Kind == types.Pointer && f.Signature.Results[0].Elem.Name == t.Name + nonPtrResult := f.Signature.Results[0].Name == t.Name + + if !ptrResult && !nonPtrResult { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected to return %s or *%s", t, t.Name.Name, t.Name.Name) + } + + ptrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Kind == types.Pointer && f.Signature.Receiver.Elem.Name == t.Name + nonPtrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Name == t.Name + + if ptrRcvr && !ptrResult { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a *%s result for a *%s receiver", t, t.Name.Name, t.Name.Name) + } + if nonPtrRcvr && !nonPtrResult { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a %s result for a %s receiver", t, t.Name.Name, t.Name.Name) + } + + return f.Signature, nil +} + +// deepCopyMethodOrDie returns the signatrue of a DeepCopy method, nil or calls klog.Fatalf +// if the type does not match. +func deepCopyMethodOrDie(t *types.Type) *types.Signature { + ret, err := deepCopyMethod(t) + if err != nil { + klog.Fatal(err) + } + return ret +} + +// deepCopyIntoMethod returns the signature of a DeepCopyInto() method, nil or an error +// if the type is wrong. DeepCopyInto allows more efficient deep copy +// implementations to be defined by the type's author. The correct signature +// for a type T is: +// func (t T) DeepCopyInto(t *T) +// or: +// func (t *T) DeepCopyInto(t *T) +func deepCopyIntoMethod(t *types.Type) (*types.Signature, error) { + f, found := t.Methods["DeepCopyInto"] + if !found { + return nil, nil + } + if len(f.Signature.Parameters) != 1 { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one parameter", t) + } + if len(f.Signature.Results) != 0 { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no result type", t) + } + + ptrParam := f.Signature.Parameters[0].Kind == types.Pointer && f.Signature.Parameters[0].Elem.Name == t.Name + + if !ptrParam { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected parameter of type *%s", t, t.Name.Name) + } + + ptrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Kind == types.Pointer && f.Signature.Receiver.Elem.Name == t.Name + nonPtrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Name == t.Name + + if !ptrRcvr && !nonPtrRcvr { + // this should never happen + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a receiver of type %s or *%s", t, t.Name.Name, t.Name.Name) + } + + return f.Signature, nil +} + +// deepCopyIntoMethodOrDie returns the signature of a DeepCopyInto() method, nil or calls klog.Fatalf +// if the type is wrong. +func deepCopyIntoMethodOrDie(t *types.Type) *types.Signature { + ret, err := deepCopyIntoMethod(t) + if err != nil { + klog.Fatal(err) + } + return ret +} + +func isRootedUnder(pkg string, roots []string) bool { + // Add trailing / to avoid false matches, e.g. foo/bar vs foo/barn. This + // assumes that bounding dirs do not have trailing slashes. + pkg = pkg + "/" + for _, root := range roots { + if strings.HasPrefix(pkg, root+"/") { + return true + } + } + return false +} + +func copyableType(t *types.Type) bool { + // If the type opts out of copy-generation, stop. + ttag := extractEnabledTypeTag(t) + if ttag != nil && ttag.value == "false" { + return false + } + + // Filter out private types. + if namer.IsPrivateGoName(t.Name.Name) { + return false + } + + if t.Kind == types.Alias { + // if the underlying built-in is not deepcopy-able, deepcopy is opt-in through definition of custom methods. + // Note that aliases of builtins, maps, slices can have deepcopy methods. + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + return true + } else { + return t.Underlying.Kind != types.Builtin || copyableType(t.Underlying) + } + } + + if t.Kind != types.Struct { + return false + } + + return true +} + +func underlyingType(t *types.Type) *types.Type { + for t.Kind == types.Alias { + t = t.Underlying + } + return t +} + +func (g *genDeepCopy) isOtherPackage(pkg string) bool { + if pkg == g.targetPackage { + return false + } + if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { + return false + } + return true +} + +func (g *genDeepCopy) Imports(c *generator.Context) (imports []string) { + importLines := []string{} + for _, singleImport := range g.imports.ImportLines() { + if g.isOtherPackage(singleImport) { + importLines = append(importLines, singleImport) + } + } + return importLines +} + +func argsFromType(ts ...*types.Type) generator.Args { + a := generator.Args{ + "type": ts[0], + } + for i, t := range ts { + a[fmt.Sprintf("type%d", i+1)] = t + } + return a +} + +func (g *genDeepCopy) Init(c *generator.Context, w io.Writer) error { + return nil +} + +func (g *genDeepCopy) needsGeneration(t *types.Type) bool { + tag := extractEnabledTypeTag(t) + tv := "" + if tag != nil { + tv = tag.value + if tv != "true" && tv != "false" { + klog.Fatalf("Type %v: unsupported %s value: %q", t, tagEnabledName, tag.value) + } + } + if g.allTypes && tv == "false" { + // The whole package is being generated, but this type has opted out. + klog.V(5).Infof("Not generating for type %v because type opted out", t) + return false + } + if !g.allTypes && tv != "true" { + // The whole package is NOT being generated, and this type has NOT opted in. + klog.V(5).Infof("Not generating for type %v because type did not opt in", t) + return false + } + return true +} + +func extractInterfacesTag(t *types.Type) []string { + var result []string + comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...) + values := types.ExtractCommentTags("+", comments)[interfacesTagName] + for _, v := range values { + if len(v) == 0 { + continue + } + intfs := strings.Split(v, ",") + for _, intf := range intfs { + if intf == "" { + continue + } + result = append(result, intf) + } + } + return result +} + +func extractNonPointerInterfaces(t *types.Type) (bool, error) { + comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...) + values := types.ExtractCommentTags("+", comments)[interfacesNonPointerTagName] + if len(values) == 0 { + return false, nil + } + result := values[0] == "true" + for _, v := range values { + if v == "true" != result { + return false, fmt.Errorf("contradicting %v value %q found to previous value %v", interfacesNonPointerTagName, v, result) + } + } + return result, nil +} + +func (g *genDeepCopy) deepCopyableInterfacesInner(c *generator.Context, t *types.Type) ([]*types.Type, error) { + if t.Kind != types.Struct { + return nil, nil + } + + intfs := extractInterfacesTag(t) + + var ts []*types.Type + for _, intf := range intfs { + t := types.ParseFullyQualifiedName(intf) + c.AddDir(t.Package) + intfT := c.Universe.Type(t) + if intfT == nil { + return nil, fmt.Errorf("unknown type %q in %s tag of type %s", intf, interfacesTagName, intfT) + } + if intfT.Kind != types.Interface { + return nil, fmt.Errorf("type %q in %s tag of type %s is not an interface, but: %q", intf, interfacesTagName, t, intfT.Kind) + } + g.imports.AddType(intfT) + ts = append(ts, intfT) + } + + return ts, nil +} + +// deepCopyableInterfaces returns the interface types to implement and whether they apply to a non-pointer receiver. +func (g *genDeepCopy) deepCopyableInterfaces(c *generator.Context, t *types.Type) ([]*types.Type, bool, error) { + ts, err := g.deepCopyableInterfacesInner(c, t) + if err != nil { + return nil, false, err + } + + set := map[string]*types.Type{} + for _, t := range ts { + set[t.String()] = t + } + + result := []*types.Type{} + for _, t := range set { + result = append(result, t) + } + + TypeSlice(result).Sort() // we need a stable sorting because it determines the order in generation + + nonPointerReceiver, err := extractNonPointerInterfaces(t) + if err != nil { + return nil, false, err + } + + return result, nonPointerReceiver, nil +} + +type TypeSlice []*types.Type + +func (s TypeSlice) Len() int { return len(s) } +func (s TypeSlice) Less(i, j int) bool { return s[i].String() < s[j].String() } +func (s TypeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s TypeSlice) Sort() { sort.Sort(s) } + +func (g *genDeepCopy) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + if !g.needsGeneration(t) { + return nil + } + klog.V(5).Infof("Generating deepcopy function for type %v", t) + + sw := generator.NewSnippetWriter(w, c, "$", "$") + args := argsFromType(t) + + if deepCopyIntoMethodOrDie(t) == nil { + sw.Do("// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\n", args) + if isReference(t) { + sw.Do("func (in $.type|raw$) DeepCopyInto(out *$.type|raw$) {\n", args) + sw.Do("{in:=&in\n", nil) + } else { + sw.Do("func (in *$.type|raw$) DeepCopyInto(out *$.type|raw$) {\n", args) + } + if deepCopyMethodOrDie(t) != nil { + if t.Methods["DeepCopy"].Signature.Receiver.Kind == types.Pointer { + sw.Do("clone := in.DeepCopy()\n", nil) + sw.Do("*out = *clone\n", nil) + } else { + sw.Do("*out = in.DeepCopy()\n", nil) + } + sw.Do("return\n", nil) + } else { + g.generateFor(t, sw) + sw.Do("return\n", nil) + } + if isReference(t) { + sw.Do("}\n", nil) + } + sw.Do("}\n\n", nil) + } + + if deepCopyMethodOrDie(t) == nil { + sw.Do("// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new $.type|raw$.\n", args) + if isReference(t) { + sw.Do("func (in $.type|raw$) DeepCopy() $.type|raw$ {\n", args) + } else { + sw.Do("func (in *$.type|raw$) DeepCopy() *$.type|raw$ {\n", args) + } + sw.Do("if in == nil { return nil }\n", nil) + sw.Do("out := new($.type|raw$)\n", args) + sw.Do("in.DeepCopyInto(out)\n", nil) + if isReference(t) { + sw.Do("return *out\n", nil) + } else { + sw.Do("return out\n", nil) + } + sw.Do("}\n\n", nil) + } + + intfs, nonPointerReceiver, err := g.deepCopyableInterfaces(c, t) + if err != nil { + return err + } + for _, intf := range intfs { + sw.Do(fmt.Sprintf("// DeepCopy%s is an autogenerated deepcopy function, copying the receiver, creating a new $.type2|raw$.\n", intf.Name.Name), argsFromType(t, intf)) + if nonPointerReceiver { + sw.Do(fmt.Sprintf("func (in $.type|raw$) DeepCopy%s() $.type2|raw$ {\n", intf.Name.Name), argsFromType(t, intf)) + sw.Do("return *in.DeepCopy()", nil) + sw.Do("}\n\n", nil) + } else { + sw.Do(fmt.Sprintf("func (in *$.type|raw$) DeepCopy%s() $.type2|raw$ {\n", intf.Name.Name), argsFromType(t, intf)) + sw.Do("if c := in.DeepCopy(); c != nil {\n", nil) + sw.Do("return c\n", nil) + sw.Do("}\n", nil) + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + } + } + + return sw.Error() +} + +// isReference return true for pointer, maps, slices and aliases of those. +func isReference(t *types.Type) bool { + if t.Kind == types.Pointer || t.Kind == types.Map || t.Kind == types.Slice { + return true + } + return t.Kind == types.Alias && isReference(underlyingType(t)) +} + +// we use the system of shadowing 'in' and 'out' so that the same code is valid +// at any nesting level. This makes the autogenerator easy to understand, and +// the compiler shouldn't care. +func (g *genDeepCopy) generateFor(t *types.Type, sw *generator.SnippetWriter) { + // derive inner types if t is an alias. We call the do* methods below with the alias type. + // basic rule: generate according to inner type, but construct objects with the alias type. + ut := underlyingType(t) + + var f func(*types.Type, *generator.SnippetWriter) + switch ut.Kind { + case types.Builtin: + f = g.doBuiltin + case types.Map: + f = g.doMap + case types.Slice: + f = g.doSlice + case types.Struct: + f = g.doStruct + case types.Pointer: + f = g.doPointer + case types.Interface: + // interfaces are handled in-line in the other cases + klog.Fatalf("Hit an interface type %v. This should never happen.", t) + case types.Alias: + // can never happen because we branch on the underlying type which is never an alias + klog.Fatalf("Hit an alias type %v. This should never happen.", t) + default: + klog.Fatalf("Hit an unsupported type %v.", t) + } + f(t, sw) +} + +// doBuiltin generates code for a builtin or an alias to a builtin. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doBuiltin(t *types.Type, sw *generator.SnippetWriter) { + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + sw.Do("*out = in.DeepCopy()\n", nil) + return + } + + sw.Do("*out = *in\n", nil) +} + +// doMap generates code for a map or an alias to a map. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { + ut := underlyingType(t) + uet := underlyingType(ut.Elem) + + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + sw.Do("*out = in.DeepCopy()\n", nil) + return + } + + if !ut.Key.IsAssignable() { + klog.Fatalf("Hit an unsupported type %v for: %v", uet, t) + } + + sw.Do("*out = make($.|raw$, len(*in))\n", t) + sw.Do("for key, val := range *in {\n", nil) + dc, dci := deepCopyMethodOrDie(ut.Elem), deepCopyIntoMethodOrDie(ut.Elem) + switch { + case dc != nil || dci != nil: + // Note: a DeepCopy exists because it is added if DeepCopyInto is manually defined + leftPointer := ut.Elem.Kind == types.Pointer + rightPointer := !isReference(ut.Elem) + if dc != nil { + rightPointer = dc.Results[0].Kind == types.Pointer + } + if leftPointer == rightPointer { + sw.Do("(*out)[key] = val.DeepCopy()\n", nil) + } else if leftPointer { + sw.Do("x := val.DeepCopy()\n", nil) + sw.Do("(*out)[key] = &x\n", nil) + } else { + sw.Do("(*out)[key] = *val.DeepCopy()\n", nil) + } + case ut.Elem.IsAnonymousStruct(): // not uet here because it needs type cast + sw.Do("(*out)[key] = val\n", nil) + case uet.IsAssignable(): + sw.Do("(*out)[key] = val\n", nil) + case uet.Kind == types.Interface: + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uet.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uet.Name.Name) + } + sw.Do("if val == nil {(*out)[key]=nil} else {\n", nil) + // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it + // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang + // parser does not give us the underlying interface name. So we cannot do any better. + sw.Do(fmt.Sprintf("(*out)[key] = val.DeepCopy%s()\n", uet.Name.Name), nil) + sw.Do("}\n", nil) + case uet.Kind == types.Slice || uet.Kind == types.Map || uet.Kind == types.Pointer: + sw.Do("var outVal $.|raw$\n", uet) + sw.Do("if val == nil { (*out)[key] = nil } else {\n", nil) + sw.Do("in, out := &val, &outVal\n", uet) + g.generateFor(ut.Elem, sw) + sw.Do("}\n", nil) + sw.Do("(*out)[key] = outVal\n", nil) + case uet.Kind == types.Struct: + sw.Do("(*out)[key] = *val.DeepCopy()\n", uet) + default: + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + } + sw.Do("}\n", nil) +} + +// doSlice generates code for a slice or an alias to a slice. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) { + ut := underlyingType(t) + uet := underlyingType(ut.Elem) + + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + sw.Do("*out = in.DeepCopy()\n", nil) + return + } + + sw.Do("*out = make($.|raw$, len(*in))\n", t) + if deepCopyMethodOrDie(ut.Elem) != nil || deepCopyIntoMethodOrDie(ut.Elem) != nil { + sw.Do("for i := range *in {\n", nil) + // Note: a DeepCopyInto exists because it is added if DeepCopy is manually defined + sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil) + sw.Do("}\n", nil) + } else if uet.Kind == types.Builtin || uet.IsAssignable() { + sw.Do("copy(*out, *in)\n", nil) + } else { + sw.Do("for i := range *in {\n", nil) + if uet.Kind == types.Slice || uet.Kind == types.Map || uet.Kind == types.Pointer || deepCopyMethodOrDie(ut.Elem) != nil || deepCopyIntoMethodOrDie(ut.Elem) != nil { + sw.Do("if (*in)[i] != nil {\n", nil) + sw.Do("in, out := &(*in)[i], &(*out)[i]\n", nil) + g.generateFor(ut.Elem, sw) + sw.Do("}\n", nil) + } else if uet.Kind == types.Interface { + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uet.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uet.Name.Name) + } + sw.Do("if (*in)[i] != nil {\n", nil) + // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it + // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang + // parser does not give us the underlying interface name. So we cannot do any better. + sw.Do(fmt.Sprintf("(*out)[i] = (*in)[i].DeepCopy%s()\n", uet.Name.Name), nil) + sw.Do("}\n", nil) + } else if uet.Kind == types.Struct { + sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil) + } else { + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + } + sw.Do("}\n", nil) + } +} + +// doStruct generates code for a struct or an alias to a struct. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { + ut := underlyingType(t) + + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + sw.Do("*out = in.DeepCopy()\n", nil) + return + } + + // Simple copy covers a lot of cases. + sw.Do("*out = *in\n", nil) + + // Now fix-up fields as needed. + for _, m := range ut.Members { + ft := m.Type + uft := underlyingType(ft) + + args := generator.Args{ + "type": ft, + "kind": ft.Kind, + "name": m.Name, + } + dc, dci := deepCopyMethodOrDie(ft), deepCopyIntoMethodOrDie(ft) + switch { + case dc != nil || dci != nil: + // Note: a DeepCopyInto exists because it is added if DeepCopy is manually defined + leftPointer := ft.Kind == types.Pointer + rightPointer := !isReference(ft) + if dc != nil { + rightPointer = dc.Results[0].Kind == types.Pointer + } + if leftPointer == rightPointer { + sw.Do("out.$.name$ = in.$.name$.DeepCopy()\n", args) + } else if leftPointer { + sw.Do("x := in.$.name$.DeepCopy()\n", args) + sw.Do("out.$.name$ = = &x\n", args) + } else { + sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args) + } + case uft.Kind == types.Builtin: + // the initial *out = *in was enough + case uft.Kind == types.Map, uft.Kind == types.Slice, uft.Kind == types.Pointer: + // Fixup non-nil reference-semantic types. + sw.Do("if in.$.name$ != nil {\n", args) + sw.Do("in, out := &in.$.name$, &out.$.name$\n", args) + g.generateFor(ft, sw) + sw.Do("}\n", nil) + case uft.Kind == types.Struct: + if ft.IsAssignable() { + sw.Do("out.$.name$ = in.$.name$\n", args) + } else { + sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args) + } + case uft.Kind == types.Interface: + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uft.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uft.Name.Name) + } + sw.Do("if in.$.name$ != nil {\n", args) + // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it + // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang + // parser does not give us the underlying interface name. So we cannot do any better. + sw.Do(fmt.Sprintf("out.$.name$ = in.$.name$.DeepCopy%s()\n", uft.Name.Name), args) + sw.Do("}\n", nil) + default: + klog.Fatalf("Hit an unsupported type %v for %v, from %v", uft, ft, t) + } + } +} + +// doPointer generates code for a pointer or an alias to a pointer. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) { + ut := underlyingType(t) + uet := underlyingType(ut.Elem) + + dc, dci := deepCopyMethodOrDie(ut.Elem), deepCopyIntoMethodOrDie(ut.Elem) + switch { + case dc != nil || dci != nil: + rightPointer := !isReference(ut.Elem) + if dc != nil { + rightPointer = dc.Results[0].Kind == types.Pointer + } + if rightPointer { + sw.Do("*out = (*in).DeepCopy()\n", nil) + } else { + sw.Do("x := (*in).DeepCopy()\n", nil) + sw.Do("*out = &x\n", nil) + } + case uet.IsAssignable(): + sw.Do("*out = new($.Elem|raw$)\n", ut) + sw.Do("**out = **in", nil) + case uet.Kind == types.Map, uet.Kind == types.Slice, uet.Kind == types.Pointer: + sw.Do("*out = new($.Elem|raw$)\n", ut) + sw.Do("if **in != nil {\n", nil) + sw.Do("in, out := *in, *out\n", nil) + g.generateFor(uet, sw) + sw.Do("}\n", nil) + case uet.Kind == types.Struct: + sw.Do("*out = new($.Elem|raw$)\n", ut) + sw.Do("(*in).DeepCopyInto(*out)\n", nil) + default: + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + } +} diff --git a/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go b/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go new file mode 100644 index 00000000..7e3bc6b4 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go @@ -0,0 +1,828 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "reflect" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// CustomArgs is used tby the go2idl framework to pass args specific to this +// generator. +type CustomArgs struct { + ExtraPeerDirs []string // Always consider these as last-ditch possibilities for conversions. +} + +// These are the comment tags that carry parameters for defaulter generation. +const tagName = "k8s:defaulter-gen" +const intputTagName = "k8s:defaulter-gen-input" + +func extractTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[tagName] +} + +func extractInputTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[intputTagName] +} + +func checkTag(comments []string, require ...string) bool { + values := types.ExtractCommentTags("+", comments)[tagName] + if len(require) == 0 { + return len(values) == 1 && values[0] == "" + } + return reflect.DeepEqual(values, require) +} + +func defaultFnNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Prefix: "SetDefaults_", + Join: func(pre string, in []string, post string) string { + return pre + strings.Join(in, "_") + post + }, + } +} + +func objectDefaultFnNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Prefix: "SetObjectDefaults_", + Join: func(pre string, in []string, post string) string { + return pre + strings.Join(in, "_") + post + }, + } +} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": namer.NewPublicNamer(1), + "raw": namer.NewRawNamer("", nil), + "defaultfn": defaultFnNamer(), + "objectdefaultfn": objectDefaultFnNamer(), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// defaults holds the declared defaulting functions for a given type (all defaulting functions +// are expected to be func(1)) +type defaults struct { + // object is the defaulter function for a top level type (typically one with TypeMeta) that + // invokes all child defaulters. May be nil if the object defaulter has not yet been generated. + object *types.Type + // base is a defaulter function defined for a type SetDefaults_Pod which does not invoke all + // child defaults - the base defaulter alone is insufficient to default a type + base *types.Type + // additional is zero or more defaulter functions of the form SetDefaults_Pod_XXXX that can be + // included in the Object defaulter. + additional []*types.Type +} + +// All of the types in conversions map are of type "DeclarationOf" with +// the underlying type being "Func". +type defaulterFuncMap map[*types.Type]defaults + +// Returns all manually-defined defaulting functions in the package. +func getManualDefaultingFunctions(context *generator.Context, pkg *types.Package, manualMap defaulterFuncMap) { + buffer := &bytes.Buffer{} + sw := generator.NewSnippetWriter(buffer, context, "$", "$") + + for _, f := range pkg.Functions { + if f.Underlying == nil || f.Underlying.Kind != types.Func { + klog.Errorf("Malformed function: %#v", f) + continue + } + if f.Underlying.Signature == nil { + klog.Errorf("Function without signature: %#v", f) + continue + } + signature := f.Underlying.Signature + // Check whether the function is defaulting function. + // Note that all of them have signature: + // object: func SetObjectDefaults_inType(*inType) + // base: func SetDefaults_inType(*inType) + // additional: func SetDefaults_inType_Qualifier(*inType) + if signature.Receiver != nil { + continue + } + if len(signature.Parameters) != 1 { + continue + } + if len(signature.Results) != 0 { + continue + } + inType := signature.Parameters[0] + if inType.Kind != types.Pointer { + continue + } + // Check if this is the primary defaulter. + args := defaultingArgsFromType(inType.Elem) + sw.Do("$.inType|defaultfn$", args) + switch { + case f.Name.Name == buffer.String(): + key := inType.Elem + // We might scan the same package twice, and that's OK. + v, ok := manualMap[key] + if ok && v.base != nil && v.base.Name.Package != pkg.Path { + panic(fmt.Sprintf("duplicate static defaulter defined: %#v", key)) + } + v.base = f + manualMap[key] = v + klog.V(6).Infof("found base defaulter function for %s from %s", key.Name, f.Name) + // Is one of the additional defaulters - a top level defaulter on a type that is + // also invoked. + case strings.HasPrefix(f.Name.Name, buffer.String()+"_"): + key := inType.Elem + v, ok := manualMap[key] + if ok { + exists := false + for _, existing := range v.additional { + if existing.Name == f.Name { + exists = true + break + } + } + if exists { + continue + } + } + v.additional = append(v.additional, f) + manualMap[key] = v + klog.V(6).Infof("found additional defaulter function for %s from %s", key.Name, f.Name) + } + buffer.Reset() + sw.Do("$.inType|objectdefaultfn$", args) + if f.Name.Name == buffer.String() { + key := inType.Elem + // We might scan the same package twice, and that's OK. + v, ok := manualMap[key] + if ok && v.base != nil && v.base.Name.Package != pkg.Path { + panic(fmt.Sprintf("duplicate static defaulter defined: %#v", key)) + } + v.object = f + manualMap[key] = v + klog.V(6).Infof("found object defaulter function for %s from %s", key.Name, f.Name) + } + buffer.Reset() + } +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + packages := generator.Packages{} + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + + // Accumulate pre-existing default functions. + // TODO: This is too ad-hoc. We need a better way. + existingDefaulters := defaulterFuncMap{} + + buffer := &bytes.Buffer{} + sw := generator.NewSnippetWriter(buffer, context, "$", "$") + + // We are generating defaults only for packages that are explicitly + // passed as InputDir. + for _, i := range context.Inputs { + klog.V(5).Infof("considering pkg %q", i) + pkg := context.Universe[i] + if pkg == nil { + // If the input had no Go files, for example. + continue + } + // typesPkg is where the types that needs defaulter are defined. + // Sometimes it is different from pkg. For example, kubernetes core/v1 + // types are defined in vendor/k8s.io/api/core/v1, while pkg is at + // pkg/api/v1. + typesPkg := pkg + + // Add defaulting functions. + getManualDefaultingFunctions(context, pkg, existingDefaulters) + + var peerPkgs []string + if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok { + for _, pkg := range customArgs.ExtraPeerDirs { + if i := strings.Index(pkg, "/vendor/"); i != -1 { + pkg = pkg[i+len("/vendor/"):] + } + peerPkgs = append(peerPkgs, pkg) + } + } + // Make sure our peer-packages are added and fully parsed. + for _, pp := range peerPkgs { + context.AddDir(pp) + getManualDefaultingFunctions(context, context.Universe[pp], existingDefaulters) + } + + typesWith := extractTag(pkg.Comments) + shouldCreateObjectDefaulterFn := func(t *types.Type) bool { + if defaults, ok := existingDefaulters[t]; ok && defaults.object != nil { + // A default generator is defined + klog.V(5).Infof(" an object defaulter already exists as %s", defaults.base.Name) + return false + } + // opt-out + if checkTag(t.SecondClosestCommentLines, "false") { + return false + } + // opt-in + if checkTag(t.SecondClosestCommentLines, "true") { + return true + } + // For every k8s:defaulter-gen tag at the package level, interpret the value as a + // field name (like TypeMeta, ListMeta, ObjectMeta) and trigger defaulter generation + // for any type with any of the matching field names. Provides a more useful package + // level defaulting than global (because we only need defaulters on a subset of objects - + // usually those with TypeMeta). + if t.Kind == types.Struct && len(typesWith) > 0 { + for _, field := range t.Members { + for _, s := range typesWith { + if field.Name == s { + return true + } + } + } + } + return false + } + + // if the types are not in the same package where the defaulter functions to be generated + inputTags := extractInputTag(pkg.Comments) + if len(inputTags) > 1 { + panic(fmt.Sprintf("there could only be one input tag, got %#v", inputTags)) + } + if len(inputTags) == 1 { + var err error + typesPkg, err = context.AddDirectory(filepath.Join(pkg.Path, inputTags[0])) + if err != nil { + klog.Fatalf("cannot import package %s", inputTags[0]) + } + // update context.Order to the latest context.Universe + orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} + context.Order = orderer.OrderUniverse(context.Universe) + } + + newDefaulters := defaulterFuncMap{} + for _, t := range typesPkg.Types { + if !shouldCreateObjectDefaulterFn(t) { + continue + } + if namer.IsPrivateGoName(t.Name.Name) { + // We won't be able to convert to a private type. + klog.V(5).Infof(" found a type %v, but it is a private name", t) + continue + } + + // create a synthetic type we can use during generation + newDefaulters[t] = defaults{} + } + + // only generate defaulters for objects that actually have defined defaulters + // prevents empty defaulters from being registered + for { + promoted := 0 + for t, d := range newDefaulters { + if d.object != nil { + continue + } + if newCallTreeForType(existingDefaulters, newDefaulters).build(t, true) != nil { + args := defaultingArgsFromType(t) + sw.Do("$.inType|objectdefaultfn$", args) + newDefaulters[t] = defaults{ + object: &types.Type{ + Name: types.Name{ + Package: pkg.Path, + Name: buffer.String(), + }, + Kind: types.Func, + }, + } + buffer.Reset() + promoted++ + } + } + if promoted != 0 { + continue + } + + // prune any types that were not used + for t, d := range newDefaulters { + if d.object == nil { + klog.V(6).Infof("did not generate defaulter for %s because no child defaulters were registered", t.Name) + delete(newDefaulters, t) + } + } + break + } + + if len(newDefaulters) == 0 { + klog.V(5).Infof("no defaulters in package %s", pkg.Name) + } + + path := pkg.Path + // if the source path is within a /vendor/ directory (for example, + // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow + // generation to output to the proper relative path (under vendor). + // Otherwise, the generator will create the file in the wrong location + // in the output directory. + // TODO: build a more fundamental concept in gengo for dealing with modifications + // to vendored packages. + if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { + expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) + if strings.Contains(expandedPath, "/vendor/") { + path = expandedPath + } + } + + packages = append(packages, + &generator.DefaultPackage{ + PackageName: filepath.Base(pkg.Path), + PackagePath: path, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenDefaulter(arguments.OutputFileBaseName, typesPkg.Path, pkg.Path, existingDefaulters, newDefaulters, peerPkgs), + } + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return t.Name.Package == typesPkg.Path + }, + }) + } + return packages +} + +// callTreeForType contains fields necessary to build a tree for types. +type callTreeForType struct { + existingDefaulters defaulterFuncMap + newDefaulters defaulterFuncMap + currentlyBuildingTypes map[*types.Type]bool +} + +func newCallTreeForType(existingDefaulters, newDefaulters defaulterFuncMap) *callTreeForType { + return &callTreeForType{ + existingDefaulters: existingDefaulters, + newDefaulters: newDefaulters, + currentlyBuildingTypes: make(map[*types.Type]bool), + } +} + +// build creates a tree of paths to fields (based on how they would be accessed in Go - pointer, elem, +// slice, or key) and the functions that should be invoked on each field. An in-order traversal of the resulting tree +// can be used to generate a Go function that invokes each nested function on the appropriate type. The return +// value may be nil if there are no functions to call on type or the type is a primitive (Defaulters can only be +// invoked on structs today). When root is true this function will not use a newDefaulter. existingDefaulters should +// contain all defaulting functions by type defined in code - newDefaulters should contain all object defaulters +// that could be or will be generated. If newDefaulters has an entry for a type, but the 'object' field is nil, +// this function skips adding that defaulter - this allows us to avoid generating object defaulter functions for +// list types that call empty defaulters. +func (c *callTreeForType) build(t *types.Type, root bool) *callNode { + parent := &callNode{} + + if root { + // the root node is always a pointer + parent.elem = true + } + + defaults, _ := c.existingDefaulters[t] + newDefaults, generated := c.newDefaulters[t] + switch { + case !root && generated && newDefaults.object != nil: + parent.call = append(parent.call, newDefaults.object) + // if we will be generating the defaulter, it by definition is a covering + // defaulter, so we halt recursion + klog.V(6).Infof("the defaulter %s will be generated as an object defaulter", t.Name) + return parent + + case defaults.object != nil: + // object defaulters are always covering + parent.call = append(parent.call, defaults.object) + return parent + + case defaults.base != nil: + parent.call = append(parent.call, defaults.base) + // if the base function indicates it "covers" (it already includes defaulters) + // we can halt recursion + if checkTag(defaults.base.CommentLines, "covers") { + klog.V(6).Infof("the defaulter %s indicates it covers all sub generators", t.Name) + return parent + } + } + + // base has been added already, now add any additional defaulters defined for this object + parent.call = append(parent.call, defaults.additional...) + + // if the type already exists, don't build the tree for it and don't generate anything. + // This is used to avoid recursion for nested recursive types. + if c.currentlyBuildingTypes[t] { + return nil + } + // if type doesn't exist, mark it as existing + c.currentlyBuildingTypes[t] = true + + defer func() { + // The type will now acts as a parent, not a nested recursive type. + // We can now build the tree for it safely. + c.currentlyBuildingTypes[t] = false + }() + + switch t.Kind { + case types.Pointer: + if child := c.build(t.Elem, false); child != nil { + child.elem = true + parent.children = append(parent.children, *child) + } + case types.Slice, types.Array: + if child := c.build(t.Elem, false); child != nil { + child.index = true + if t.Elem.Kind == types.Pointer { + child.elem = true + } + parent.children = append(parent.children, *child) + } + case types.Map: + if child := c.build(t.Elem, false); child != nil { + child.key = true + parent.children = append(parent.children, *child) + } + case types.Struct: + for _, field := range t.Members { + name := field.Name + if len(name) == 0 { + if field.Type.Kind == types.Pointer { + name = field.Type.Elem.Name.Name + } else { + name = field.Type.Name.Name + } + } + if child := c.build(field.Type, false); child != nil { + child.field = name + parent.children = append(parent.children, *child) + } + } + case types.Alias: + if child := c.build(t.Underlying, false); child != nil { + parent.children = append(parent.children, *child) + } + } + if len(parent.children) == 0 && len(parent.call) == 0 { + //klog.V(6).Infof("decided type %s needs no generation", t.Name) + return nil + } + return parent +} + +const ( + runtimePackagePath = "k8s.io/apimachinery/pkg/runtime" + conversionPackagePath = "k8s.io/apimachinery/pkg/conversion" +) + +// genDefaulter produces a file with a autogenerated conversions. +type genDefaulter struct { + generator.DefaultGen + typesPackage string + outputPackage string + peerPackages []string + newDefaulters defaulterFuncMap + existingDefaulters defaulterFuncMap + imports namer.ImportTracker + typesForInit []*types.Type +} + +func NewGenDefaulter(sanitizedName, typesPackage, outputPackage string, existingDefaulters, newDefaulters defaulterFuncMap, peerPkgs []string) generator.Generator { + return &genDefaulter{ + DefaultGen: generator.DefaultGen{ + OptionalName: sanitizedName, + }, + typesPackage: typesPackage, + outputPackage: outputPackage, + peerPackages: peerPkgs, + newDefaulters: newDefaulters, + existingDefaulters: existingDefaulters, + imports: generator.NewImportTracker(), + typesForInit: make([]*types.Type, 0), + } +} + +func (g *genDefaulter) Namers(c *generator.Context) namer.NameSystems { + // Have the raw namer for this file track what it imports. + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genDefaulter) isOtherPackage(pkg string) bool { + if pkg == g.outputPackage { + return false + } + if strings.HasSuffix(pkg, `"`+g.outputPackage+`"`) { + return false + } + return true +} + +func (g *genDefaulter) Filter(c *generator.Context, t *types.Type) bool { + defaults, ok := g.newDefaulters[t] + if !ok || defaults.object == nil { + return false + } + g.typesForInit = append(g.typesForInit, t) + return true +} + +func (g *genDefaulter) Imports(c *generator.Context) (imports []string) { + var importLines []string + for _, singleImport := range g.imports.ImportLines() { + if g.isOtherPackage(singleImport) { + importLines = append(importLines, singleImport) + } + } + return importLines +} + +func (g *genDefaulter) Init(c *generator.Context, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + scheme := c.Universe.Type(types.Name{Package: runtimePackagePath, Name: "Scheme"}) + schemePtr := &types.Type{ + Kind: types.Pointer, + Elem: scheme, + } + sw.Do("// RegisterDefaults adds defaulters functions to the given scheme.\n", nil) + sw.Do("// Public to allow building arbitrary schemes.\n", nil) + sw.Do("// All generated defaulters are covering - they call all nested defaulters.\n", nil) + sw.Do("func RegisterDefaults(scheme $.|raw$) error {\n", schemePtr) + for _, t := range g.typesForInit { + args := defaultingArgsFromType(t) + sw.Do("scheme.AddTypeDefaultingFunc(&$.inType|raw${}, func(obj interface{}) { $.inType|objectdefaultfn$(obj.(*$.inType|raw$)) })\n", args) + } + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + return sw.Error() +} + +func (g *genDefaulter) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + if _, ok := g.newDefaulters[t]; !ok { + return nil + } + + klog.V(5).Infof("generating for type %v", t) + + callTree := newCallTreeForType(g.existingDefaulters, g.newDefaulters).build(t, true) + if callTree == nil { + klog.V(5).Infof(" no defaulters defined") + return nil + } + i := 0 + callTree.VisitInOrder(func(ancestors []*callNode, current *callNode) { + if len(current.call) == 0 { + return + } + path := callPath(append(ancestors, current)) + klog.V(5).Infof(" %d: %s", i, path) + i++ + }) + + sw := generator.NewSnippetWriter(w, c, "$", "$") + g.generateDefaulter(t, callTree, sw) + return sw.Error() +} + +func defaultingArgsFromType(inType *types.Type) generator.Args { + return generator.Args{ + "inType": inType, + } +} + +func (g *genDefaulter) generateDefaulter(inType *types.Type, callTree *callNode, sw *generator.SnippetWriter) { + sw.Do("func $.inType|objectdefaultfn$(in *$.inType|raw$) {\n", defaultingArgsFromType(inType)) + callTree.WriteMethod("in", 0, nil, sw) + sw.Do("}\n\n", nil) +} + +// callNode represents an entry in a tree of Go type accessors - the path from the root to a leaf represents +// how in Go code an access would be performed. For example, if a defaulting function exists on a container +// lifecycle hook, to invoke that defaulter correctly would require this Go code: +// +// for i := range pod.Spec.Containers { +// o := &pod.Spec.Containers[i] +// if o.LifecycleHook != nil { +// SetDefaults_LifecycleHook(o.LifecycleHook) +// } +// } +// +// That would be represented by a call tree like: +// +// callNode +// field: "Spec" +// children: +// - field: "Containers" +// children: +// - index: true +// children: +// - field: "LifecycleHook" +// elem: true +// call: +// - SetDefaults_LifecycleHook +// +// which we can traverse to build that Go struct (you must call the field Spec, then Containers, then range over +// that field, then check whether the LifecycleHook field is nil, before calling SetDefaults_LifecycleHook on +// the pointer to that field). +type callNode struct { + // field is the name of the Go member to access + field string + // key is true if this is a map and we must range over the key and values + key bool + // index is true if this is a slice and we must range over the slice values + index bool + // elem is true if the previous elements refer to a pointer (typically just field) + elem bool + + // call is all of the functions that must be invoked on this particular node, in order + call []*types.Type + // children is the child call nodes that must also be traversed + children []callNode +} + +// CallNodeVisitorFunc is a function for visiting a call tree. ancestors is the list of all parents +// of this node to the root of the tree - will be empty at the root. +type CallNodeVisitorFunc func(ancestors []*callNode, node *callNode) + +func (n *callNode) VisitInOrder(fn CallNodeVisitorFunc) { + n.visitInOrder(nil, fn) +} + +func (n *callNode) visitInOrder(ancestors []*callNode, fn CallNodeVisitorFunc) { + fn(ancestors, n) + ancestors = append(ancestors, n) + for i := range n.children { + n.children[i].visitInOrder(ancestors, fn) + } +} + +var ( + indexVariables = "ijklmnop" + localVariables = "abcdefgh" +) + +// varsForDepth creates temporary variables guaranteed to be unique within lexical Go scopes +// of this depth in a function. It uses canonical Go loop variables for the first 7 levels +// and then resorts to uglier prefixes. +func varsForDepth(depth int) (index, local string) { + if depth > len(indexVariables) { + index = fmt.Sprintf("i%d", depth) + } else { + index = indexVariables[depth : depth+1] + } + if depth > len(localVariables) { + local = fmt.Sprintf("local%d", depth) + } else { + local = localVariables[depth : depth+1] + } + return +} + +// writeCalls generates a list of function calls based on the calls field for the provided variable +// name and pointer. +func (n *callNode) writeCalls(varName string, isVarPointer bool, sw *generator.SnippetWriter) { + accessor := varName + if !isVarPointer { + accessor = "&" + accessor + } + for _, fn := range n.call { + sw.Do("$.fn|raw$($.var$)\n", generator.Args{ + "fn": fn, + "var": accessor, + }) + } +} + +// WriteMethod performs an in-order traversal of the calltree, generating loops and if blocks as necessary +// to correctly turn the call tree into a method body that invokes all calls on all child nodes of the call tree. +// Depth is used to generate local variables at the proper depth. +func (n *callNode) WriteMethod(varName string, depth int, ancestors []*callNode, sw *generator.SnippetWriter) { + // if len(n.call) > 0 { + // sw.Do(fmt.Sprintf("// %s\n", callPath(append(ancestors, n)).String()), nil) + // } + + if len(n.field) > 0 { + varName = varName + "." + n.field + } + + index, local := varsForDepth(depth) + vars := generator.Args{ + "index": index, + "local": local, + "var": varName, + } + + isPointer := n.elem && !n.index + if isPointer && len(ancestors) > 0 { + sw.Do("if $.var$ != nil {\n", vars) + } + + switch { + case n.index: + sw.Do("for $.index$ := range $.var$ {\n", vars) + if n.elem { + sw.Do("$.local$ := $.var$[$.index$]\n", vars) + } else { + sw.Do("$.local$ := &$.var$[$.index$]\n", vars) + } + + n.writeCalls(local, true, sw) + for i := range n.children { + n.children[i].WriteMethod(local, depth+1, append(ancestors, n), sw) + } + sw.Do("}\n", nil) + case n.key: + default: + n.writeCalls(varName, isPointer, sw) + for i := range n.children { + n.children[i].WriteMethod(varName, depth, append(ancestors, n), sw) + } + } + + if isPointer && len(ancestors) > 0 { + sw.Do("}\n", nil) + } +} + +type callPath []*callNode + +// String prints a representation of a callPath that roughly approximates what a Go accessor +// would look like. Used for debugging only. +func (path callPath) String() string { + if len(path) == 0 { + return "" + } + var parts []string + for _, p := range path { + last := len(parts) - 1 + switch { + case p.elem: + if len(parts) > 0 { + parts[last] = "*" + parts[last] + } else { + parts = append(parts, "*") + } + case p.index: + if len(parts) > 0 { + parts[last] = parts[last] + "[i]" + } else { + parts = append(parts, "[i]") + } + case p.key: + if len(parts) > 0 { + parts[last] = parts[last] + "[key]" + } else { + parts = append(parts, "[key]") + } + default: + if len(p.field) > 0 { + parts = append(parts, p.field) + } else { + parts = append(parts, "") + } + } + } + var calls []string + for _, fn := range path[len(path)-1].call { + calls = append(calls, fn.Name.String()) + } + if len(calls) == 0 { + calls = append(calls, "") + } + + return strings.Join(parts, ".") + " calls " + strings.Join(calls, ", ") +} diff --git a/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go b/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go new file mode 100644 index 00000000..ea5716b6 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go @@ -0,0 +1,290 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generators has the generators for the import-boss utility. +package generators + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +const ( + importBossFileType = "import-boss" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer("", nil), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "raw" +} + +// Packages makes the import-boss package definition. +func Packages(c *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + pkgs := generator.Packages{} + c.FileTypes = map[string]generator.FileType{ + importBossFileType: importRuleFile{}, + } + + for _, p := range c.Universe { + if !arguments.InputIncludes(p) { + // Don't run on e.g. third party dependencies. + continue + } + savedPackage := p + pkgs = append(pkgs, &generator.DefaultPackage{ + PackageName: p.Name, + PackagePath: p.Path, + // GeneratorFunc returns a list of generators. Each generator makes a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{&importRules{ + myPackage: savedPackage, + }} + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return false + }, + }) + } + + return pkgs +} + +// A single import restriction rule. +type Rule struct { + // All import paths that match this regexp... + SelectorRegexp string + // ... must have one of these prefixes ... + AllowedPrefixes []string + // ... and must not have one of these prefixes. + ForbiddenPrefixes []string +} + +type fileFormat struct { + CurrentImports []string + + Rules []Rule +} + +func readFile(path string) (*fileFormat, error) { + currentBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("couldn't read %v: %v", path, err) + } + + var current fileFormat + err = json.Unmarshal(currentBytes, ¤t) + if err != nil { + return nil, fmt.Errorf("couldn't unmarshal %v: %v", path, err) + } + return ¤t, nil +} + +func writeFile(path string, ff *fileFormat) error { + raw, err := json.MarshalIndent(ff, "", "\t") + if err != nil { + return fmt.Errorf("couldn't format data for file %v.\n%#v", path, ff) + } + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("couldn't open %v for writing: %v", path, err) + } + defer f.Close() + _, err = f.Write(raw) + return err +} + +// This does the actual checking, since it knows the literal destination file. +type importRuleFile struct{} + +func (importRuleFile) AssembleFile(f *generator.File, path string) error { + return nil +} + +// TODO: make a flag to enable this, or expose this information in some other way. +func (importRuleFile) listEntireImportTree(f *generator.File, path string) error { + // If the file exists, populate its current imports. This is mostly to help + // humans figure out what they need to fix. + if _, err := os.Stat(path); err != nil { + // Ignore packages which haven't opted in by adding an .import-restrictions file. + return nil + } + + current, err := readFile(path) + if err != nil { + return err + } + + current.CurrentImports = []string{} + for v := range f.Imports { + current.CurrentImports = append(current.CurrentImports, v) + } + sort.Strings(current.CurrentImports) + + return writeFile(path, current) +} + +// removeLastDir removes the last directory, but leaves the file name +// unchanged. It returns the new path and the removed directory. So: +// "a/b/c/file" -> ("a/b/file", "c") +func removeLastDir(path string) (newPath, removedDir string) { + dir, file := filepath.Split(path) + dir = strings.TrimSuffix(dir, string(filepath.Separator)) + return filepath.Join(filepath.Dir(dir), file), filepath.Base(dir) +} + +// Keep going up a directory until we find an .import-restrictions file. +func recursiveRead(path string) (*fileFormat, string, error) { + for { + if _, err := os.Stat(path); err == nil { + ff, err := readFile(path) + return ff, path, err + } + + nextPath, removedDir := removeLastDir(path) + if nextPath == path || removedDir == "src" { + break + } + path = nextPath + } + return nil, "", nil +} + +func (importRuleFile) VerifyFile(f *generator.File, path string) error { + rules, actualPath, err := recursiveRead(path) + if err != nil { + return fmt.Errorf("error finding rules file: %v", err) + } + + if rules == nil { + // No restrictions on this directory. + return nil + } + + forbiddenImports := map[string]string{} + allowedMismatchedImports := []string{} + for _, r := range rules.Rules { + re, err := regexp.Compile(r.SelectorRegexp) + if err != nil { + return fmt.Errorf("regexp `%s` in file %q doesn't compile: %v", r.SelectorRegexp, actualPath, err) + } + for v := range f.Imports { + klog.V(4).Infof("Checking %v matches %v: %v\n", r.SelectorRegexp, v, re.MatchString(v)) + if !re.MatchString(v) { + continue + } + for _, forbidden := range r.ForbiddenPrefixes { + klog.V(4).Infof("Checking %v against %v\n", v, forbidden) + if strings.HasPrefix(v, forbidden) { + forbiddenImports[v] = forbidden + } + } + found := false + for _, allowed := range r.AllowedPrefixes { + klog.V(4).Infof("Checking %v against %v\n", v, allowed) + if strings.HasPrefix(v, allowed) { + found = true + break + } + } + if !found { + allowedMismatchedImports = append(allowedMismatchedImports, v) + } + } + } + + if len(forbiddenImports) > 0 || len(allowedMismatchedImports) > 0 { + var errorBuilder strings.Builder + for i, f := range forbiddenImports { + fmt.Fprintf(&errorBuilder, "import %v has forbidden prefix %v\n", i, f) + } + if len(allowedMismatchedImports) > 0 { + sort.Sort(sort.StringSlice(allowedMismatchedImports)) + fmt.Fprintf(&errorBuilder, "the following imports did not match any allowed prefix:\n") + for _, i := range allowedMismatchedImports { + fmt.Fprintf(&errorBuilder, " %v\n", i) + } + } + return errors.New(errorBuilder.String()) + } + if len(rules.Rules) > 0 { + klog.V(2).Infof("%v passes rules found in %v\n", path, actualPath) + } + + return nil +} + +// importRules produces a file with a set for a single type. +type importRules struct { + myPackage *types.Package + imports namer.ImportTracker +} + +var ( + _ = generator.Generator(&importRules{}) + _ = generator.FileType(importRuleFile{}) +) + +func (r *importRules) Name() string { return "import rules" } +func (r *importRules) Filter(*generator.Context, *types.Type) bool { return false } +func (r *importRules) Namers(*generator.Context) namer.NameSystems { return nil } +func (r *importRules) PackageVars(*generator.Context) []string { return []string{} } +func (r *importRules) PackageConsts(*generator.Context) []string { return []string{} } +func (r *importRules) GenerateType(*generator.Context, *types.Type, io.Writer) error { return nil } +func (r *importRules) Filename() string { return ".import-restrictions" } +func (r *importRules) FileType() string { return importBossFileType } +func (r *importRules) Init(c *generator.Context, w io.Writer) error { return nil } +func (r *importRules) Finalize(*generator.Context, io.Writer) error { return nil } + +func dfsImports(dest *[]string, seen map[string]bool, p *types.Package) { + for _, p2 := range p.Imports { + if seen[p2.Path] { + continue + } + seen[p2.Path] = true + dfsImports(dest, seen, p2) + *dest = append(*dest, p2.Path) + } +} + +func (r *importRules) Imports(*generator.Context) []string { + all := []string{} + dfsImports(&all, map[string]bool{}, r.myPackage) + return all +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go b/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go new file mode 100644 index 00000000..8ddce7e3 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go @@ -0,0 +1,362 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generators has the generators for the set-gen utility. +package generators + +import ( + "io" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// Packages makes the sets package definition. +func Packages(_ *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + return generator.Packages{&generator.DefaultPackage{ + PackageName: "sets", + PackagePath: arguments.OutputPackagePath, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// Package sets has auto-generated set types. +`), + // GeneratorFunc returns a list of generators. Each generator makes a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + // Make a separate file for the Empty type, since it's shared by every type. + generator.DefaultGen{ + OptionalName: "empty", + OptionalBody: []byte(emptyTypeDecl), + }, + } + // Since we want a file per type that we generate a set for, we + // have to provide a function for this. + for _, t := range c.Order { + generators = append(generators, &genSet{ + DefaultGen: generator.DefaultGen{ + // Use the privatized version of the + // type name as the file name. + // + // TODO: make a namer that converts + // camelCase to '-' separation for file + // names? + OptionalName: c.Namers["private"].Name(t), + }, + outputPackage: arguments.OutputPackagePath, + typeToMatch: t, + imports: generator.NewImportTracker(), + }) + } + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + // It would be reasonable to filter by the type's package here. + // It might be necessary if your input directory has a big + // import graph. + switch t.Kind { + case types.Map, types.Slice, types.Pointer: + // These types can't be keys in a map. + return false + case types.Builtin: + return true + case types.Struct: + // Only some structs can be keys in a map. This is triggered by the line + // // +genset + // or + // // +genset=true + return extractBoolTagOrDie("genset", t.CommentLines) == true + } + return false + }, + }} +} + +// genSet produces a file with a set for a single type. +type genSet struct { + generator.DefaultGen + outputPackage string + typeToMatch *types.Type + imports namer.ImportTracker +} + +// Filter ignores all but one type because we're making a single file per type. +func (g *genSet) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } + +func (g *genSet) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genSet) Imports(c *generator.Context) (imports []string) { + return append(g.imports.ImportLines(), "reflect", "sort") +} + +// args constructs arguments for templates. Usage: +// g.args(t, "key1", value1, "key2", value2, ...) +// +// 't' is loaded with the key 'type'. +// +// We could use t directly as the argument, but doing it this way makes it easy +// to mix in additional parameters. This feature is not used in this set +// generator, but is present as an example. +func (g *genSet) args(t *types.Type, kv ...interface{}) interface{} { + m := map[interface{}]interface{}{"type": t} + for i := 0; i < len(kv)/2; i++ { + m[kv[i*2]] = kv[i*2+1] + } + return m +} + +// GenerateType makes the body of a file implementing a set for type t. +func (g *genSet) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + sw.Do(setCode, g.args(t)) + sw.Do("func less$.type|public$(lhs, rhs $.type|raw$) bool {\n", g.args(t)) + g.lessBody(sw, t) + sw.Do("}\n", g.args(t)) + return sw.Error() +} + +func (g *genSet) lessBody(sw *generator.SnippetWriter, t *types.Type) { + // TODO: make this recursive, handle pointers and multiple nested structs... + switch t.Kind { + case types.Struct: + for _, m := range types.FlattenMembers(t.Members) { + sw.Do("if lhs.$.Name$ < rhs.$.Name$ { return true }\n", m) + sw.Do("if lhs.$.Name$ > rhs.$.Name$ { return false }\n", m) + } + sw.Do("return false\n", nil) + default: + sw.Do("return lhs < rhs\n", nil) + } +} + +// written to the "empty.go" file. +var emptyTypeDecl = ` +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} +` + +// Written for every type. If you've never used text/template before: +// $.type$ refers to the source type; |public means to +// call the function giving the public name, |raw the raw type name. +var setCode = `// sets.$.type|public$ is a set of $.type|raw$s, implemented via map[$.type|raw$]struct{} for minimal memory consumption. +type $.type|public$ map[$.type|raw$]Empty + +// New$.type|public$ creates a $.type|public$ from a list of values. +func New$.type|public$(items ...$.type|raw$) $.type|public$ { + ss := $.type|public${} + ss.Insert(items...) + return ss +} + +// $.type|public$KeySet creates a $.type|public$ from a keys of a map[$.type|raw$](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func $.type|public$KeySet(theMap interface{}) $.type|public$ { + v := reflect.ValueOf(theMap) + ret := $.type|public${} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().($.type|raw$)) + } + return ret +} + +// Insert adds items to the set. +func (s $.type|public$) Insert(items ...$.type|raw$) $.type|public$ { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s $.type|public$) Delete(items ...$.type|raw$) $.type|public$ { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s $.type|public$) Has(item $.type|raw$) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s $.type|public$) HasAll(items ...$.type|raw$) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s $.type|public$) HasAny(items ...$.type|raw$) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s $.type|public$) Difference(s2 $.type|public$) $.type|public$ { + result := New$.type|public$() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 $.type|public$) Union(s2 $.type|public$) $.type|public$ { + result := New$.type|public$() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 $.type|public$) Intersection(s2 $.type|public$) $.type|public$ { + var walk, other $.type|public$ + result := New$.type|public$() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 $.type|public$) IsSuperset(s2 $.type|public$) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 $.type|public$) Equal(s2 $.type|public$) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOf$.type|public$ []$.type|raw$ + +func (s sortableSliceOf$.type|public$) Len() int { return len(s) } +func (s sortableSliceOf$.type|public$) Less(i, j int) bool { return less$.type|public$(s[i], s[j]) } +func (s sortableSliceOf$.type|public$) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted $.type|raw$ slice. +func (s $.type|public$) List() []$.type|raw$ { + res := make(sortableSliceOf$.type|public$, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []$.type|raw$(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s $.type|public$) UnsortedList() []$.type|raw$ { + res :=make([]$.type|raw$, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s $.type|public$) PopAny() ($.type|raw$, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue $.type|raw$ + return zeroValue, false +} + +// Len returns the size of the set. +func (s $.type|public$) Len() int { + return len(s) +} + +` diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go b/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go new file mode 100644 index 00000000..bb3b4d25 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if +// it exists, the value is boolean. If the tag did not exist, it returns +// false. +func extractBoolTagOrDie(key string, lines []string) bool { + val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) + if err != nil { + klog.Fatalf(err.Error()) + } + return val +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go b/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go new file mode 100644 index 00000000..9bfa85d4 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +type Byte map[byte]Empty + +// NewByte creates a Byte from a list of values. +func NewByte(items ...byte) Byte { + ss := Byte{} + ss.Insert(items...) + return ss +} + +// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func ByteKeySet(theMap interface{}) Byte { + v := reflect.ValueOf(theMap) + ret := Byte{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(byte)) + } + return ret +} + +// Insert adds items to the set. +func (s Byte) Insert(items ...byte) Byte { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Byte) Delete(items ...byte) Byte { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Byte) Has(item byte) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Byte) HasAll(items ...byte) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Byte) HasAny(items ...byte) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Byte) Difference(s2 Byte) Byte { + result := NewByte() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Byte) Union(s2 Byte) Byte { + result := NewByte() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Byte) Intersection(s2 Byte) Byte { + var walk, other Byte + result := NewByte() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Byte) IsSuperset(s2 Byte) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Byte) Equal(s2 Byte) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfByte []byte + +func (s sortableSliceOfByte) Len() int { return len(s) } +func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } +func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted byte slice. +func (s Byte) List() []byte { + res := make(sortableSliceOfByte, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []byte(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Byte) UnsortedList() []byte { + res := make([]byte, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Byte) PopAny() (byte, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue byte + return zeroValue, false +} + +// Len returns the size of the set. +func (s Byte) Len() int { + return len(s) +} + +func lessByte(lhs, rhs byte) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/doc.go b/vendor/k8s.io/gengo/examples/set-gen/sets/doc.go new file mode 100644 index 00000000..b152a0bf --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// Package sets has auto-generated set types. +package sets diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/empty.go b/vendor/k8s.io/gengo/examples/set-gen/sets/empty.go new file mode 100644 index 00000000..e11e622c --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/empty.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/int.go b/vendor/k8s.io/gengo/examples/set-gen/sets/int.go new file mode 100644 index 00000000..88bd7096 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/int.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +type Int map[int]Empty + +// NewInt creates a Int from a list of values. +func NewInt(items ...int) Int { + ss := Int{} + ss.Insert(items...) + return ss +} + +// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func IntKeySet(theMap interface{}) Int { + v := reflect.ValueOf(theMap) + ret := Int{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int)) + } + return ret +} + +// Insert adds items to the set. +func (s Int) Insert(items ...int) Int { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int) Delete(items ...int) Int { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int) Has(item int) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int) HasAll(items ...int) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int) HasAny(items ...int) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int) Difference(s2 Int) Int { + result := NewInt() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int) Union(s2 Int) Int { + result := NewInt() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int) Intersection(s2 Int) Int { + var walk, other Int + result := NewInt() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int) IsSuperset(s2 Int) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int) Equal(s2 Int) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt []int + +func (s sortableSliceOfInt) Len() int { return len(s) } +func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } +func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int slice. +func (s Int) List() []int { + res := make(sortableSliceOfInt, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int) UnsortedList() []int { + res := make([]int, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int) PopAny() (int, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int) Len() int { + return len(s) +} + +func lessInt(lhs, rhs int) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go b/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go new file mode 100644 index 00000000..b375a1b0 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +type Int64 map[int64]Empty + +// NewInt64 creates a Int64 from a list of values. +func NewInt64(items ...int64) Int64 { + ss := Int64{} + ss.Insert(items...) + return ss +} + +// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int64KeySet(theMap interface{}) Int64 { + v := reflect.ValueOf(theMap) + ret := Int64{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int64)) + } + return ret +} + +// Insert adds items to the set. +func (s Int64) Insert(items ...int64) Int64 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int64) Delete(items ...int64) Int64 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int64) Has(item int64) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int64) HasAll(items ...int64) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int64) HasAny(items ...int64) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int64) Difference(s2 Int64) Int64 { + result := NewInt64() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int64) Union(s2 Int64) Int64 { + result := NewInt64() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int64) Intersection(s2 Int64) Int64 { + var walk, other Int64 + result := NewInt64() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int64) IsSuperset(s2 Int64) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int64) Equal(s2 Int64) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt64 []int64 + +func (s sortableSliceOfInt64) Len() int { return len(s) } +func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } +func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int64 slice. +func (s Int64) List() []int64 { + res := make(sortableSliceOfInt64, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int64(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int64) UnsortedList() []int64 { + res := make([]int64, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int64) PopAny() (int64, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int64 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int64) Len() int { + return len(s) +} + +func lessInt64(lhs, rhs int64) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/string.go b/vendor/k8s.io/gengo/examples/set-gen/sets/string.go new file mode 100644 index 00000000..e6f37db8 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/string.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) String { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) String { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/gengo/generator/default_generator.go b/vendor/k8s.io/gengo/generator/default_generator.go new file mode 100644 index 00000000..f9476682 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/default_generator.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "io" + + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +const ( + GolangFileType = "golang" +) + +// DefaultGen implements a do-nothing Generator. +// +// It can be used to implement static content files. +type DefaultGen struct { + // OptionalName, if present, will be used for the generator's name, and + // the filename (with ".go" appended). + OptionalName string + + // OptionalBody, if present, will be used as the return from the "Init" + // method. This causes it to be static content for the entire file if + // no other generator touches the file. + OptionalBody []byte +} + +func (d DefaultGen) Name() string { return d.OptionalName } +func (d DefaultGen) Filter(*Context, *types.Type) bool { return true } +func (d DefaultGen) Namers(*Context) namer.NameSystems { return nil } +func (d DefaultGen) Imports(*Context) []string { return []string{} } +func (d DefaultGen) PackageVars(*Context) []string { return []string{} } +func (d DefaultGen) PackageConsts(*Context) []string { return []string{} } +func (d DefaultGen) GenerateType(*Context, *types.Type, io.Writer) error { return nil } +func (d DefaultGen) Filename() string { return d.OptionalName + ".go" } +func (d DefaultGen) FileType() string { return GolangFileType } +func (d DefaultGen) Finalize(*Context, io.Writer) error { return nil } + +func (d DefaultGen) Init(c *Context, w io.Writer) error { + _, err := w.Write(d.OptionalBody) + return err +} + +var ( + _ = Generator(DefaultGen{}) +) diff --git a/vendor/k8s.io/gengo/generator/default_package.go b/vendor/k8s.io/gengo/generator/default_package.go new file mode 100644 index 00000000..11517fc6 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/default_package.go @@ -0,0 +1,72 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "k8s.io/gengo/types" +) + +// DefaultPackage contains a default implementation of Package. +type DefaultPackage struct { + // Short name of package, used in the "package xxxx" line. + PackageName string + // Import path of the package, and the location on disk of the package. + PackagePath string + + // Emitted at the top of every file. + HeaderText []byte + + // Emitted only for a "doc.go" file; appended to the HeaderText for + // that file. + PackageDocumentation []byte + + // If non-nil, will be called on "Generators"; otherwise, the static + // list will be used. So you should set only one of these two fields. + GeneratorFunc func(*Context) []Generator + GeneratorList []Generator + + // Optional; filters the types exposed to the generators. + FilterFunc func(*Context, *types.Type) bool +} + +func (d *DefaultPackage) Name() string { return d.PackageName } +func (d *DefaultPackage) Path() string { return d.PackagePath } + +func (d *DefaultPackage) Filter(c *Context, t *types.Type) bool { + if d.FilterFunc != nil { + return d.FilterFunc(c, t) + } + return true +} + +func (d *DefaultPackage) Generators(c *Context) []Generator { + if d.GeneratorFunc != nil { + return d.GeneratorFunc(c) + } + return d.GeneratorList +} + +func (d *DefaultPackage) Header(filename string) []byte { + if filename == "doc.go" { + return append(d.HeaderText, d.PackageDocumentation...) + } + return d.HeaderText +} + +var ( + _ = Package(&DefaultPackage{}) +) diff --git a/vendor/k8s.io/gengo/generator/doc.go b/vendor/k8s.io/gengo/generator/doc.go new file mode 100644 index 00000000..d8e12534 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/doc.go @@ -0,0 +1,31 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generator defines an interface for code generators to implement. +// +// To use this package, you'll implement the "Package" and "Generator" +// interfaces; you'll call NewContext to load up the types you want to work +// with, and then you'll call one or more of the Execute methods. See the +// interface definitions for explanations. All output will have gofmt called on +// it automatically, so you do not need to worry about generating correct +// indentation. +// +// This package also exposes SnippetWriter. SnippetWriter reduces to a minimum +// the boilerplate involved in setting up a template from go's text/template +// package. Additionally, all naming systems in the Context will be added as +// functions to the parsed template, so that they can be called directly from +// your templates! +package generator // import "k8s.io/gengo/generator" diff --git a/vendor/k8s.io/gengo/generator/error_tracker.go b/vendor/k8s.io/gengo/generator/error_tracker.go new file mode 100644 index 00000000..964dae37 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/error_tracker.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "io" +) + +// ErrorTracker tracks errors to the underlying writer, so that you can ignore +// them until you're ready to return. +type ErrorTracker struct { + io.Writer + err error +} + +// NewErrorTracker makes a new error tracker; note that it implements io.Writer. +func NewErrorTracker(w io.Writer) *ErrorTracker { + return &ErrorTracker{Writer: w} +} + +// Write intercepts calls to Write. +func (et *ErrorTracker) Write(p []byte) (n int, err error) { + if et.err != nil { + return 0, et.err + } + n, err = et.Writer.Write(p) + if err != nil { + et.err = err + } + return n, err +} + +// Error returns nil if no error has occurred, otherwise it returns the error. +func (et *ErrorTracker) Error() error { + return et.err +} diff --git a/vendor/k8s.io/gengo/generator/execute.go b/vendor/k8s.io/gengo/generator/execute.go new file mode 100644 index 00000000..b5f5aaeb --- /dev/null +++ b/vendor/k8s.io/gengo/generator/execute.go @@ -0,0 +1,312 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/imports" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +func errs2strings(errors []error) []string { + strs := make([]string, len(errors)) + for i := range errors { + strs[i] = errors[i].Error() + } + return strs +} + +// ExecutePackages runs the generators for every package in 'packages'. 'outDir' +// is the base directory in which to place all the generated packages; it +// should be a physical path on disk, not an import path. e.g.: +// /path/to/home/path/to/gopath/src/ +// Each package has its import path already, this will be appended to 'outDir'. +func (c *Context) ExecutePackages(outDir string, packages Packages) error { + var errors []error + for _, p := range packages { + if err := c.ExecutePackage(outDir, p); err != nil { + errors = append(errors, err) + } + } + if len(errors) > 0 { + return fmt.Errorf("some packages had errors:\n%v\n", strings.Join(errs2strings(errors), "\n")) + } + return nil +} + +type DefaultFileType struct { + Format func([]byte) ([]byte, error) + Assemble func(io.Writer, *File) +} + +func (ft DefaultFileType) AssembleFile(f *File, pathname string) error { + klog.V(2).Infof("Assembling file %q", pathname) + destFile, err := os.Create(pathname) + if err != nil { + return err + } + defer destFile.Close() + + b := &bytes.Buffer{} + et := NewErrorTracker(b) + ft.Assemble(et, f) + if et.Error() != nil { + return et.Error() + } + if formatted, err := ft.Format(b.Bytes()); err != nil { + err = fmt.Errorf("unable to format file %q (%v).", pathname, err) + // Write the file anyway, so they can see what's going wrong and fix the generator. + if _, err2 := destFile.Write(b.Bytes()); err2 != nil { + return err2 + } + return err + } else { + _, err = destFile.Write(formatted) + return err + } +} + +func (ft DefaultFileType) VerifyFile(f *File, pathname string) error { + klog.V(2).Infof("Verifying file %q", pathname) + friendlyName := filepath.Join(f.PackageName, f.Name) + b := &bytes.Buffer{} + et := NewErrorTracker(b) + ft.Assemble(et, f) + if et.Error() != nil { + return et.Error() + } + formatted, err := ft.Format(b.Bytes()) + if err != nil { + return fmt.Errorf("unable to format the output for %q: %v", friendlyName, err) + } + existing, err := ioutil.ReadFile(pathname) + if err != nil { + return fmt.Errorf("unable to read file %q for comparison: %v", friendlyName, err) + } + if bytes.Compare(formatted, existing) == 0 { + return nil + } + // Be nice and find the first place where they differ + i := 0 + for i < len(formatted) && i < len(existing) && formatted[i] == existing[i] { + i++ + } + eDiff, fDiff := existing[i:], formatted[i:] + if len(eDiff) > 100 { + eDiff = eDiff[:100] + } + if len(fDiff) > 100 { + fDiff = fDiff[:100] + } + return fmt.Errorf("output for %q differs; first existing/expected diff: \n %q\n %q", friendlyName, string(eDiff), string(fDiff)) +} + +func assembleGolangFile(w io.Writer, f *File) { + w.Write(f.Header) + fmt.Fprintf(w, "package %v\n\n", f.PackageName) + + if len(f.Imports) > 0 { + fmt.Fprint(w, "import (\n") + for i := range f.Imports { + if strings.Contains(i, "\"") { + // they included quotes, or are using the + // `name "path/to/pkg"` format. + fmt.Fprintf(w, "\t%s\n", i) + } else { + fmt.Fprintf(w, "\t%q\n", i) + } + } + fmt.Fprint(w, ")\n\n") + } + + if f.Vars.Len() > 0 { + fmt.Fprint(w, "var (\n") + w.Write(f.Vars.Bytes()) + fmt.Fprint(w, ")\n\n") + } + + if f.Consts.Len() > 0 { + fmt.Fprint(w, "const (\n") + w.Write(f.Consts.Bytes()) + fmt.Fprint(w, ")\n\n") + } + + w.Write(f.Body.Bytes()) +} + +func importsWrapper(src []byte) ([]byte, error) { + return imports.Process("", src, nil) +} + +func NewGolangFile() *DefaultFileType { + return &DefaultFileType{ + Format: importsWrapper, + Assemble: assembleGolangFile, + } +} + +// format should be one line only, and not end with \n. +func addIndentHeaderComment(b *bytes.Buffer, format string, args ...interface{}) { + if b.Len() > 0 { + fmt.Fprintf(b, "\n// "+format+"\n", args...) + } else { + fmt.Fprintf(b, "// "+format+"\n", args...) + } +} + +func (c *Context) filteredBy(f func(*Context, *types.Type) bool) *Context { + c2 := *c + c2.Order = []*types.Type{} + for _, t := range c.Order { + if f(c, t) { + c2.Order = append(c2.Order, t) + } + } + return &c2 +} + +// make a new context; inheret c.Namers, but add on 'namers'. In case of a name +// collision, the namer in 'namers' wins. +func (c *Context) addNameSystems(namers namer.NameSystems) *Context { + if namers == nil { + return c + } + c2 := *c + // Copy the existing name systems so we don't corrupt a parent context + c2.Namers = namer.NameSystems{} + for k, v := range c.Namers { + c2.Namers[k] = v + } + + for name, namer := range namers { + c2.Namers[name] = namer + } + return &c2 +} + +// ExecutePackage executes a single package. 'outDir' is the base directory in +// which to place the package; it should be a physical path on disk, not an +// import path. e.g.: '/path/to/home/path/to/gopath/src/' The package knows its +// import path already, this will be appended to 'outDir'. +func (c *Context) ExecutePackage(outDir string, p Package) error { + path := filepath.Join(outDir, p.Path()) + klog.V(2).Infof("Processing package %q, disk location %q", p.Name(), path) + // Filter out any types the *package* doesn't care about. + packageContext := c.filteredBy(p.Filter) + os.MkdirAll(path, 0755) + files := map[string]*File{} + for _, g := range p.Generators(packageContext) { + // Filter out types the *generator* doesn't care about. + genContext := packageContext.filteredBy(g.Filter) + // Now add any extra name systems defined by this generator + genContext = genContext.addNameSystems(g.Namers(genContext)) + + fileType := g.FileType() + if len(fileType) == 0 { + return fmt.Errorf("generator %q must specify a file type", g.Name()) + } + f := files[g.Filename()] + if f == nil { + // This is the first generator to reference this file, so start it. + f = &File{ + Name: g.Filename(), + FileType: fileType, + PackageName: p.Name(), + Header: p.Header(g.Filename()), + Imports: map[string]struct{}{}, + } + files[f.Name] = f + } else { + if f.FileType != g.FileType() { + return fmt.Errorf("file %q already has type %q, but generator %q wants to use type %q", f.Name, f.FileType, g.Name(), g.FileType()) + } + } + + if vars := g.PackageVars(genContext); len(vars) > 0 { + addIndentHeaderComment(&f.Vars, "Package-wide variables from generator %q.", g.Name()) + for _, v := range vars { + if _, err := fmt.Fprintf(&f.Vars, "%s\n", v); err != nil { + return err + } + } + } + if consts := g.PackageConsts(genContext); len(consts) > 0 { + addIndentHeaderComment(&f.Consts, "Package-wide consts from generator %q.", g.Name()) + for _, v := range consts { + if _, err := fmt.Fprintf(&f.Consts, "%s\n", v); err != nil { + return err + } + } + } + if err := genContext.executeBody(&f.Body, g); err != nil { + return err + } + if imports := g.Imports(genContext); len(imports) > 0 { + for _, i := range imports { + f.Imports[i] = struct{}{} + } + } + } + + var errors []error + for _, f := range files { + finalPath := filepath.Join(path, f.Name) + assembler, ok := c.FileTypes[f.FileType] + if !ok { + return fmt.Errorf("the file type %q registered for file %q does not exist in the context", f.FileType, f.Name) + } + var err error + if c.Verify { + err = assembler.VerifyFile(f, finalPath) + } else { + err = assembler.AssembleFile(f, finalPath) + } + if err != nil { + errors = append(errors, err) + } + } + if len(errors) > 0 { + return fmt.Errorf("errors in package %q:\n%v\n", p.Path(), strings.Join(errs2strings(errors), "\n")) + } + return nil +} + +func (c *Context) executeBody(w io.Writer, generator Generator) error { + et := NewErrorTracker(w) + if err := generator.Init(c, et); err != nil { + return err + } + for _, t := range c.Order { + if err := generator.GenerateType(c, t, et); err != nil { + return err + } + } + if err := generator.Finalize(c, et); err != nil { + return err + } + return et.Error() +} diff --git a/vendor/k8s.io/gengo/generator/generator.go b/vendor/k8s.io/gengo/generator/generator.go new file mode 100644 index 00000000..05a3f65f --- /dev/null +++ b/vendor/k8s.io/gengo/generator/generator.go @@ -0,0 +1,219 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "bytes" + "io" + + "k8s.io/gengo/namer" + "k8s.io/gengo/parser" + "k8s.io/gengo/types" +) + +// Package contains the contract for generating a package. +type Package interface { + // Name returns the package short name. + Name() string + // Path returns the package import path. + Path() string + + // Filter should return true if this package cares about this type. + // Otherwise, this type will be omitted from the type ordering for + // this package. + Filter(*Context, *types.Type) bool + + // Header should return a header for the file, including comment markers. + // Useful for copyright notices and doc strings. Include an + // autogeneration notice! Do not include the "package x" line. + Header(filename string) []byte + + // Generators returns the list of generators for this package. It is + // allowed for more than one generator to write to the same file. + // A Context is passed in case the list of generators depends on the + // input types. + Generators(*Context) []Generator +} + +type File struct { + Name string + FileType string + PackageName string + Header []byte + Imports map[string]struct{} + Vars bytes.Buffer + Consts bytes.Buffer + Body bytes.Buffer +} + +type FileType interface { + AssembleFile(f *File, path string) error + VerifyFile(f *File, path string) error +} + +// Packages is a list of packages to generate. +type Packages []Package + +// Generator is the contract for anything that wants to do auto-generation. +// It's expected that the io.Writers passed to the below functions will be +// ErrorTrackers; this allows implementations to not check for io errors, +// making more readable code. +// +// The call order for the functions that take a Context is: +// 1. Filter() // Subsequent calls see only types that pass this. +// 2. Namers() // Subsequent calls see the namers provided by this. +// 3. PackageVars() +// 4. PackageConsts() +// 5. Init() +// 6. GenerateType() // Called N times, once per type in the context's Order. +// 7. Imports() +// +// You may have multiple generators for the same file. +type Generator interface { + // The name of this generator. Will be included in generated comments. + Name() string + + // Filter should return true if this generator cares about this type. + // (otherwise, GenerateType will not be called.) + // + // Filter is called before any of the generator's other functions; + // subsequent calls will get a context with only the types that passed + // this filter. + Filter(*Context, *types.Type) bool + + // If this generator needs special namers, return them here. These will + // override the original namers in the context if there is a collision. + // You may return nil if you don't need special names. These names will + // be available in the context passed to the rest of the generator's + // functions. + // + // A use case for this is to return a namer that tracks imports. + Namers(*Context) namer.NameSystems + + // Init should write an init function, and any other content that's not + // generated per-type. (It's not intended for generator specific + // initialization! Do that when your Package constructs the + // Generators.) + Init(*Context, io.Writer) error + + // Finalize should write finish up functions, and any other content that's not + // generated per-type. + Finalize(*Context, io.Writer) error + + // PackageVars should emit an array of variable lines. They will be + // placed in a var ( ... ) block. There's no need to include a leading + // \t or trailing \n. + PackageVars(*Context) []string + + // PackageConsts should emit an array of constant lines. They will be + // placed in a const ( ... ) block. There's no need to include a leading + // \t or trailing \n. + PackageConsts(*Context) []string + + // GenerateType should emit the code for a particular type. + GenerateType(*Context, *types.Type, io.Writer) error + + // Imports should return a list of necessary imports. They will be + // formatted correctly. You do not need to include quotation marks, + // return only the package name; alternatively, you can also return + // imports in the format `name "path/to/pkg"`. Imports will be called + // after Init, PackageVars, PackageConsts, and GenerateType, to allow + // you to keep track of what imports you actually need. + Imports(*Context) []string + + // Preferred file name of this generator, not including a path. It is + // allowed for multiple generators to use the same filename, but it's + // up to you to make sure they don't have colliding import names. + // TODO: provide per-file import tracking, removing the requirement + // that generators coordinate.. + Filename() string + + // A registered file type in the context to generate this file with. If + // the FileType is not found in the context, execution will stop. + FileType() string +} + +// Context is global context for individual generators to consume. +type Context struct { + // A map from the naming system to the names for that system. E.g., you + // might have public names and several private naming systems. + Namers namer.NameSystems + + // All the types, in case you want to look up something. + Universe types.Universe + + // All the user-specified packages. This is after recursive expansion. + Inputs []string + + // The canonical ordering of the types (will be filtered by both the + // Package's and Generator's Filter methods). + Order []*types.Type + + // A set of types this context can process. If this is empty or nil, + // the default "golang" filetype will be provided. + FileTypes map[string]FileType + + // If true, Execute* calls will just verify that the existing output is + // correct. (You may set this after calling NewContext.) + Verify bool + + // Allows generators to add packages at runtime. + builder *parser.Builder +} + +// NewContext generates a context from the given builder, naming systems, and +// the naming system you wish to construct the canonical ordering from. +func NewContext(b *parser.Builder, nameSystems namer.NameSystems, canonicalOrderName string) (*Context, error) { + universe, err := b.FindTypes() + if err != nil { + return nil, err + } + + c := &Context{ + Namers: namer.NameSystems{}, + Universe: universe, + Inputs: b.FindPackages(), + FileTypes: map[string]FileType{ + GolangFileType: NewGolangFile(), + }, + builder: b, + } + + for name, systemNamer := range nameSystems { + c.Namers[name] = systemNamer + if name == canonicalOrderName { + orderer := namer.Orderer{Namer: systemNamer} + c.Order = orderer.OrderUniverse(universe) + } + } + return c, nil +} + +// AddDir adds a Go package to the context. The specified path must be a single +// go package import path. GOPATH, GOROOT, and the location of your go binary +// (`which go`) will all be searched, in the normal Go fashion. +// Deprecated. Please use AddDirectory. +func (ctxt *Context) AddDir(path string) error { + return ctxt.builder.AddDirTo(path, &ctxt.Universe) +} + +// AddDirectory adds a Go package to the context. The specified path must be a +// single go package import path. GOPATH, GOROOT, and the location of your go +// binary (`which go`) will all be searched, in the normal Go fashion. +func (ctxt *Context) AddDirectory(path string) (*types.Package, error) { + return ctxt.builder.AddDirectoryTo(path, &ctxt.Universe) +} diff --git a/vendor/k8s.io/gengo/generator/import_tracker.go b/vendor/k8s.io/gengo/generator/import_tracker.go new file mode 100644 index 00000000..5d058410 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/import_tracker.go @@ -0,0 +1,64 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "strings" + + "k8s.io/klog" + + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +func NewImportTracker(typesToAdd ...*types.Type) namer.ImportTracker { + tracker := namer.NewDefaultImportTracker(types.Name{}) + tracker.IsInvalidType = func(*types.Type) bool { return false } + tracker.LocalName = func(name types.Name) string { return golangTrackerLocalName(&tracker, name) } + tracker.PrintImport = func(path, name string) string { return name + " \"" + path + "\"" } + + tracker.AddTypes(typesToAdd...) + return &tracker + +} + +func golangTrackerLocalName(tracker namer.ImportTracker, t types.Name) string { + path := t.Package + + // Using backslashes in package names causes gengo to produce Go code which + // will not compile with the gc compiler. See the comment on GoSeperator. + if strings.ContainsRune(path, '\\') { + klog.Warningf("Warning: backslash used in import path '%v', this is unsupported.\n", path) + } + + dirs := strings.Split(path, namer.GoSeperator) + for n := len(dirs) - 1; n >= 0; n-- { + // follow kube convention of not having anything between directory names + name := strings.Join(dirs[n:], "") + name = strings.Replace(name, "_", "", -1) + // These characters commonly appear in import paths for go + // packages, but aren't legal go names. So we'll sanitize. + name = strings.Replace(name, ".", "", -1) + name = strings.Replace(name, "-", "", -1) + if _, found := tracker.PathOf(name); found { + // This name collides with some other package + continue + } + return name + } + panic("can't find import for " + path) +} diff --git a/vendor/k8s.io/gengo/generator/snippet_writer.go b/vendor/k8s.io/gengo/generator/snippet_writer.go new file mode 100644 index 00000000..eae917c1 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/snippet_writer.go @@ -0,0 +1,154 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "fmt" + "io" + "runtime" + "text/template" +) + +// SnippetWriter is an attempt to make the template library usable. +// Methods are chainable, and you don't have to check Error() until you're all +// done. +type SnippetWriter struct { + w io.Writer + context *Context + // Left & right delimiters. text/template defaults to "{{" and "}}" + // which is totally unusable for go code based templates. + left, right string + funcMap template.FuncMap + err error +} + +// w is the destination; left and right are the delimiters; @ and $ are both +// reasonable choices. +// +// c is used to make a function for every naming system, to which you can pass +// a type and get the corresponding name. +func NewSnippetWriter(w io.Writer, c *Context, left, right string) *SnippetWriter { + sw := &SnippetWriter{ + w: w, + context: c, + left: left, + right: right, + funcMap: template.FuncMap{}, + } + for name, namer := range c.Namers { + sw.funcMap[name] = namer.Name + } + return sw +} + +// Do parses format and runs args through it. You can have arbitrary logic in +// the format (see the text/template documentation), but consider running many +// short templaces, with ordinary go logic in between--this may be more +// readable. Do is chainable. Any error causes every other call to do to be +// ignored, and the error will be returned by Error(). So you can check it just +// once, at the end of your function. +// +// 'args' can be quite literally anything; read the text/template documentation +// for details. Maps and structs work particularly nicely. Conveniently, the +// types package is designed to have structs that are easily referencable from +// the template language. +// +// Example: +// +// sw := generator.NewSnippetWriter(outBuffer, context, "$", "$") +// sw.Do(`The public type name is: $.type|public$`, map[string]interface{}{"type": t}) +// return sw.Error() +// +// Where: +// * "$" starts a template directive +// * "." references the entire thing passed as args +// * "type" therefore sees a map and looks up the key "type" +// * "|" means "pass the thing on the left to the thing on the right" +// * "public" is the name of a naming system, so the SnippetWriter has given +// the template a function called "public" that takes a *types.Type and +// returns the naming system's name. E.g., if the type is "string" this might +// return "String". +// * the second "$" ends the template directive. +// +// The map is actually not necessary. The below does the same thing: +// +// sw.Do(`The public type name is: $.|public$`, t) +// +// You may or may not find it more readable to use the map with a descriptive +// key, but if you want to pass more than one arg, the map or a custom struct +// becomes a requirement. You can do arbitrary logic inside these templates, +// but you should consider doing the logic in go and stitching them together +// for the sake of your readers. +// +// TODO: Change Do() to optionally take a list of pairs of parameters (key, value) +// and have it construct a combined map with that and args. +func (s *SnippetWriter) Do(format string, args interface{}) *SnippetWriter { + if s.err != nil { + return s + } + // Name the template by source file:line so it can be found when + // there's an error. + _, file, line, _ := runtime.Caller(1) + tmpl, err := template. + New(fmt.Sprintf("%s:%d", file, line)). + Delims(s.left, s.right). + Funcs(s.funcMap). + Parse(format) + if err != nil { + s.err = err + return s + } + err = tmpl.Execute(s.w, args) + if err != nil { + s.err = err + } + return s +} + +// Args exists to make it convenient to construct arguments for +// SnippetWriter.Do. +type Args map[interface{}]interface{} + +// With makes a copy of a and adds the given key, value pair. +func (a Args) With(key, value interface{}) Args { + a2 := Args{key: value} + for k, v := range a { + a2[k] = v + } + return a2 +} + +// WithArgs makes a copy of a and adds the given arguments. +func (a Args) WithArgs(rhs Args) Args { + a2 := Args{} + for k, v := range rhs { + a2[k] = v + } + for k, v := range a { + a2[k] = v + } + return a2 +} + +func (s *SnippetWriter) Out() io.Writer { + return s.w +} + +// Error returns any encountered error. +func (s *SnippetWriter) Error() error { + return s.err +} diff --git a/vendor/k8s.io/gengo/namer/doc.go b/vendor/k8s.io/gengo/namer/doc.go new file mode 100644 index 00000000..8a44ea99 --- /dev/null +++ b/vendor/k8s.io/gengo/namer/doc.go @@ -0,0 +1,31 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package namer has support for making different type naming systems. +// +// This is because sometimes you want to refer to the literal type, sometimes +// you want to make a name for the thing you're generating, and you want to +// make the name based on the type. For example, if you have `type foo string`, +// you want to be able to generate something like `func FooPrinter(f *foo) { +// Print(string(*f)) }`; that is, you want to refer to a public name, a literal +// name, and the underlying literal name. +// +// This package supports the idea of a "Namer" and a set of "NameSystems" to +// support these use cases. +// +// Additionally, a "RawNamer" can optionally keep track of what needs to be +// imported. +package namer // import "k8s.io/gengo/namer" diff --git a/vendor/k8s.io/gengo/namer/import_tracker.go b/vendor/k8s.io/gengo/namer/import_tracker.go new file mode 100644 index 00000000..37094b2d --- /dev/null +++ b/vendor/k8s.io/gengo/namer/import_tracker.go @@ -0,0 +1,112 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "sort" + + "k8s.io/gengo/types" +) + +// ImportTracker may be passed to a namer.RawNamer, to track the imports needed +// for the types it names. +// +// TODO: pay attention to the package name (instead of renaming every package). +type DefaultImportTracker struct { + pathToName map[string]string + // forbidden names are in here. (e.g. "go" is a directory in which + // there is code, but "go" is not a legal name for a package, so we put + // it here to prevent us from naming any package "go") + nameToPath map[string]string + local types.Name + + // Returns true if a given types is an invalid type and should be ignored. + IsInvalidType func(*types.Type) bool + // Returns the final local name for the given name + LocalName func(types.Name) string + // Returns the "import" line for a given (path, name). + PrintImport func(string, string) string +} + +func NewDefaultImportTracker(local types.Name) DefaultImportTracker { + return DefaultImportTracker{ + pathToName: map[string]string{}, + nameToPath: map[string]string{}, + local: local, + } +} + +func (tracker *DefaultImportTracker) AddTypes(types ...*types.Type) { + for _, t := range types { + tracker.AddType(t) + } +} +func (tracker *DefaultImportTracker) AddType(t *types.Type) { + if tracker.local.Package == t.Name.Package { + return + } + + if tracker.IsInvalidType(t) { + if t.Kind == types.Builtin { + return + } + if _, ok := tracker.nameToPath[t.Name.Package]; !ok { + tracker.nameToPath[t.Name.Package] = "" + } + return + } + + if len(t.Name.Package) == 0 { + return + } + path := t.Name.Path + if len(path) == 0 { + path = t.Name.Package + } + if _, ok := tracker.pathToName[path]; ok { + return + } + name := tracker.LocalName(t.Name) + tracker.nameToPath[name] = path + tracker.pathToName[path] = name +} + +func (tracker *DefaultImportTracker) ImportLines() []string { + importPaths := []string{} + for path := range tracker.pathToName { + importPaths = append(importPaths, path) + } + sort.Sort(sort.StringSlice(importPaths)) + out := []string{} + for _, path := range importPaths { + out = append(out, tracker.PrintImport(path, tracker.pathToName[path])) + } + return out +} + +// LocalNameOf returns the name you would use to refer to the package at the +// specified path within the body of a file. +func (tracker *DefaultImportTracker) LocalNameOf(path string) string { + return tracker.pathToName[path] +} + +// PathOf returns the path that a given localName is referring to within the +// body of a file. +func (tracker *DefaultImportTracker) PathOf(localName string) (string, bool) { + name, ok := tracker.nameToPath[localName] + return name, ok +} diff --git a/vendor/k8s.io/gengo/namer/namer.go b/vendor/k8s.io/gengo/namer/namer.go new file mode 100644 index 00000000..d700a00a --- /dev/null +++ b/vendor/k8s.io/gengo/namer/namer.go @@ -0,0 +1,383 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "path/filepath" + "strings" + + "k8s.io/gengo/types" +) + +const ( + // GoSeperator is used to split go import paths. + // Forward slash is used instead of filepath.Seperator because it is the + // only universally-accepted path delimiter and the only delimiter not + // potentially forbidden by Go compilers. (In particular gc does not allow + // the use of backslashes in import paths.) + // See https://golang.org/ref/spec#Import_declarations. + // See also https://github.com/kubernetes/gengo/issues/83#issuecomment-367040772. + GoSeperator = "/" +) + +// Returns whether a name is a private Go name. +func IsPrivateGoName(name string) bool { + return len(name) == 0 || strings.ToLower(name[:1]) == name[:1] +} + +// NewPublicNamer is a helper function that returns a namer that makes +// CamelCase names. See the NameStrategy struct for an explanation of the +// arguments to this constructor. +func NewPublicNamer(prependPackageNames int, ignoreWords ...string) *NameStrategy { + n := &NameStrategy{ + Join: Joiner(IC, IC), + IgnoreWords: map[string]bool{}, + PrependPackageNames: prependPackageNames, + } + for _, w := range ignoreWords { + n.IgnoreWords[w] = true + } + return n +} + +// NewPrivateNamer is a helper function that returns a namer that makes +// camelCase names. See the NameStrategy struct for an explanation of the +// arguments to this constructor. +func NewPrivateNamer(prependPackageNames int, ignoreWords ...string) *NameStrategy { + n := &NameStrategy{ + Join: Joiner(IL, IC), + IgnoreWords: map[string]bool{}, + PrependPackageNames: prependPackageNames, + } + for _, w := range ignoreWords { + n.IgnoreWords[w] = true + } + return n +} + +// NewRawNamer will return a Namer that makes a name by which you would +// directly refer to a type, optionally keeping track of the import paths +// necessary to reference the names it provides. Tracker may be nil. +// The 'pkg' is the full package name, in which the Namer is used - all +// types from that package will be referenced by just type name without +// referencing the package. +// +// For example, if the type is map[string]int, a raw namer will literally +// return "map[string]int". +// +// Or if the type, in package foo, is "type Bar struct { ... }", then the raw +// namer will return "foo.Bar" as the name of the type, and if 'tracker' was +// not nil, will record that package foo needs to be imported. +func NewRawNamer(pkg string, tracker ImportTracker) *rawNamer { + return &rawNamer{pkg: pkg, tracker: tracker} +} + +// Names is a map from Type to name, as defined by some Namer. +type Names map[*types.Type]string + +// Namer takes a type, and assigns a name. +// +// The purpose of this complexity is so that you can assign coherent +// side-by-side systems of names for the types. For example, you might want a +// public interface, a private implementation struct, and also to reference +// literally the type name. +// +// Note that it is safe to call your own Name() function recursively to find +// the names of keys, elements, etc. This is because anonymous types can't have +// cycles in their names, and named types don't require the sort of recursion +// that would be problematic. +type Namer interface { + Name(*types.Type) string +} + +// NameSystems is a map of a system name to a namer for that system. +type NameSystems map[string]Namer + +// NameStrategy is a general Namer. The easiest way to use it is to copy the +// Public/PrivateNamer variables, and modify the members you wish to change. +// +// The Name method produces a name for the given type, of the forms: +// Anonymous types: +// Named types: +// +// In all cases, every part of the name is run through the capitalization +// functions. +// +// The IgnoreWords map can be set if you have directory names that are +// semantically meaningless for naming purposes, e.g. "proto". +// +// Prefix and Suffix can be used to disambiguate parallel systems of type +// names. For example, if you want to generate an interface and an +// implementation, you might want to suffix one with "Interface" and the other +// with "Implementation". Another common use-- if you want to generate private +// types, and one of your source types could be "string", you can't use the +// default lowercase private namer. You'll have to add a suffix or prefix. +type NameStrategy struct { + Prefix, Suffix string + Join func(pre string, parts []string, post string) string + + // Add non-meaningful package directory names here (e.g. "proto") and + // they will be ignored. + IgnoreWords map[string]bool + + // If > 0, prepend exactly that many package directory names (or as + // many as there are). Package names listed in "IgnoreWords" will be + // ignored. + // + // For example, if Ignore words lists "proto" and type Foo is in + // pkg/server/frobbing/proto, then a value of 1 will give a type name + // of FrobbingFoo, 2 gives ServerFrobbingFoo, etc. + PrependPackageNames int + + // A cache of names thus far assigned by this namer. + Names +} + +// IC ensures the first character is uppercase. +func IC(in string) string { + if in == "" { + return in + } + return strings.ToUpper(in[:1]) + in[1:] +} + +// IL ensures the first character is lowercase. +func IL(in string) string { + if in == "" { + return in + } + return strings.ToLower(in[:1]) + in[1:] +} + +// Joiner lets you specify functions that preprocess the various components of +// a name before joining them. You can construct e.g. camelCase or CamelCase or +// any other way of joining words. (See the IC and IL convenience functions.) +func Joiner(first, others func(string) string) func(pre string, in []string, post string) string { + return func(pre string, in []string, post string) string { + tmp := []string{others(pre)} + for i := range in { + tmp = append(tmp, others(in[i])) + } + tmp = append(tmp, others(post)) + return first(strings.Join(tmp, "")) + } +} + +func (ns *NameStrategy) removePrefixAndSuffix(s string) string { + // The join function may have changed capitalization. + lowerIn := strings.ToLower(s) + lowerP := strings.ToLower(ns.Prefix) + lowerS := strings.ToLower(ns.Suffix) + b, e := 0, len(s) + if strings.HasPrefix(lowerIn, lowerP) { + b = len(ns.Prefix) + } + if strings.HasSuffix(lowerIn, lowerS) { + e -= len(ns.Suffix) + } + return s[b:e] +} + +var ( + importPathNameSanitizer = strings.NewReplacer("-", "_", ".", "") +) + +// filters out unwanted directory names and sanitizes remaining names. +func (ns *NameStrategy) filterDirs(path string) []string { + allDirs := strings.Split(path, GoSeperator) + dirs := make([]string, 0, len(allDirs)) + for _, p := range allDirs { + if ns.IgnoreWords == nil || !ns.IgnoreWords[p] { + dirs = append(dirs, importPathNameSanitizer.Replace(p)) + } + } + return dirs +} + +// See the comment on NameStrategy. +func (ns *NameStrategy) Name(t *types.Type) string { + if ns.Names == nil { + ns.Names = Names{} + } + if s, ok := ns.Names[t]; ok { + return s + } + + if t.Name.Package != "" { + dirs := append(ns.filterDirs(t.Name.Package), t.Name.Name) + i := ns.PrependPackageNames + 1 + dn := len(dirs) + if i > dn { + i = dn + } + name := ns.Join(ns.Prefix, dirs[dn-i:], ns.Suffix) + ns.Names[t] = name + return name + } + + // Only anonymous types remain. + var name string + switch t.Kind { + case types.Builtin: + name = ns.Join(ns.Prefix, []string{t.Name.Name}, ns.Suffix) + case types.Map: + name = ns.Join(ns.Prefix, []string{ + "Map", + ns.removePrefixAndSuffix(ns.Name(t.Key)), + "To", + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) + case types.Slice: + name = ns.Join(ns.Prefix, []string{ + "Slice", + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) + case types.Pointer: + name = ns.Join(ns.Prefix, []string{ + "Pointer", + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) + case types.Struct: + names := []string{"Struct"} + for _, m := range t.Members { + names = append(names, ns.removePrefixAndSuffix(ns.Name(m.Type))) + } + name = ns.Join(ns.Prefix, names, ns.Suffix) + case types.Chan: + name = ns.Join(ns.Prefix, []string{ + "Chan", + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) + case types.Interface: + // TODO: add to name test + names := []string{"Interface"} + for _, m := range t.Methods { + // TODO: include function signature + names = append(names, m.Name.Name) + } + name = ns.Join(ns.Prefix, names, ns.Suffix) + case types.Func: + // TODO: add to name test + parts := []string{"Func"} + for _, pt := range t.Signature.Parameters { + parts = append(parts, ns.removePrefixAndSuffix(ns.Name(pt))) + } + parts = append(parts, "Returns") + for _, rt := range t.Signature.Results { + parts = append(parts, ns.removePrefixAndSuffix(ns.Name(rt))) + } + name = ns.Join(ns.Prefix, parts, ns.Suffix) + default: + name = "unnameable_" + string(t.Kind) + } + ns.Names[t] = name + return name +} + +// ImportTracker allows a raw namer to keep track of the packages needed for +// import. You can implement yourself or use the one in the generation package. +type ImportTracker interface { + AddType(*types.Type) + LocalNameOf(packagePath string) string + PathOf(localName string) (string, bool) + ImportLines() []string +} + +type rawNamer struct { + pkg string + tracker ImportTracker + Names +} + +// Name makes a name the way you'd write it to literally refer to type t, +// making ordinary assumptions about how you've imported t's package (or using +// r.tracker to specifically track the package imports). +func (r *rawNamer) Name(t *types.Type) string { + if r.Names == nil { + r.Names = Names{} + } + if name, ok := r.Names[t]; ok { + return name + } + if t.Name.Package != "" { + var name string + if r.tracker != nil { + r.tracker.AddType(t) + if t.Name.Package == r.pkg { + name = t.Name.Name + } else { + name = r.tracker.LocalNameOf(t.Name.Package) + "." + t.Name.Name + } + } else { + if t.Name.Package == r.pkg { + name = t.Name.Name + } else { + name = filepath.Base(t.Name.Package) + "." + t.Name.Name + } + } + r.Names[t] = name + return name + } + var name string + switch t.Kind { + case types.Builtin: + name = t.Name.Name + case types.Map: + name = "map[" + r.Name(t.Key) + "]" + r.Name(t.Elem) + case types.Slice: + name = "[]" + r.Name(t.Elem) + case types.Pointer: + name = "*" + r.Name(t.Elem) + case types.Struct: + elems := []string{} + for _, m := range t.Members { + elems = append(elems, m.Name+" "+r.Name(m.Type)) + } + name = "struct{" + strings.Join(elems, "; ") + "}" + case types.Chan: + // TODO: include directionality + name = "chan " + r.Name(t.Elem) + case types.Interface: + // TODO: add to name test + elems := []string{} + for _, m := range t.Methods { + // TODO: include function signature + elems = append(elems, m.Name.Name) + } + name = "interface{" + strings.Join(elems, "; ") + "}" + case types.Func: + // TODO: add to name test + params := []string{} + for _, pt := range t.Signature.Parameters { + params = append(params, r.Name(pt)) + } + results := []string{} + for _, rt := range t.Signature.Results { + results = append(results, r.Name(rt)) + } + name = "func(" + strings.Join(params, ",") + ")" + if len(results) == 1 { + name += " " + results[0] + } else if len(results) > 1 { + name += " (" + strings.Join(results, ",") + ")" + } + default: + name = "unnameable_" + string(t.Kind) + } + r.Names[t] = name + return name +} diff --git a/vendor/k8s.io/gengo/namer/order.go b/vendor/k8s.io/gengo/namer/order.go new file mode 100644 index 00000000..f86282b9 --- /dev/null +++ b/vendor/k8s.io/gengo/namer/order.go @@ -0,0 +1,69 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "sort" + + "k8s.io/gengo/types" +) + +// Orderer produces an ordering of types given a Namer. +type Orderer struct { + Namer +} + +// OrderUniverse assigns a name to every type in the Universe, including Types, +// Functions and Variables, and returns a list sorted by those names. +func (o *Orderer) OrderUniverse(u types.Universe) []*types.Type { + list := tList{ + namer: o.Namer, + } + for _, p := range u { + for _, t := range p.Types { + list.types = append(list.types, t) + } + for _, f := range p.Functions { + list.types = append(list.types, f) + } + for _, v := range p.Variables { + list.types = append(list.types, v) + } + } + sort.Sort(list) + return list.types +} + +// OrderTypes assigns a name to every type, and returns a list sorted by those +// names. +func (o *Orderer) OrderTypes(typeList []*types.Type) []*types.Type { + list := tList{ + namer: o.Namer, + types: typeList, + } + sort.Sort(list) + return list.types +} + +type tList struct { + namer Namer + types []*types.Type +} + +func (t tList) Len() int { return len(t.types) } +func (t tList) Less(i, j int) bool { return t.namer.Name(t.types[i]) < t.namer.Name(t.types[j]) } +func (t tList) Swap(i, j int) { t.types[i], t.types[j] = t.types[j], t.types[i] } diff --git a/vendor/k8s.io/gengo/namer/plural_namer.go b/vendor/k8s.io/gengo/namer/plural_namer.go new file mode 100644 index 00000000..a9a198a7 --- /dev/null +++ b/vendor/k8s.io/gengo/namer/plural_namer.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "strings" + + "k8s.io/gengo/types" +) + +var consonants = "bcdfghjklmnpqrsttvwxyz" + +type pluralNamer struct { + // key is the case-sensitive type name, value is the case-insensitive + // intended output. + exceptions map[string]string + finalize func(string) string +} + +// NewPublicPluralNamer returns a namer that returns the plural form of the input +// type's name, starting with a uppercase letter. +func NewPublicPluralNamer(exceptions map[string]string) *pluralNamer { + return &pluralNamer{exceptions, IC} +} + +// NewPrivatePluralNamer returns a namer that returns the plural form of the input +// type's name, starting with a lowercase letter. +func NewPrivatePluralNamer(exceptions map[string]string) *pluralNamer { + return &pluralNamer{exceptions, IL} +} + +// NewAllLowercasePluralNamer returns a namer that returns the plural form of the input +// type's name, with all letters in lowercase. +func NewAllLowercasePluralNamer(exceptions map[string]string) *pluralNamer { + return &pluralNamer{exceptions, strings.ToLower} +} + +// Name returns the plural form of the type's name. If the type's name is found +// in the exceptions map, the map value is returned. +func (r *pluralNamer) Name(t *types.Type) string { + singular := t.Name.Name + var plural string + var ok bool + if plural, ok = r.exceptions[singular]; ok { + return r.finalize(plural) + } + if len(singular) < 2 { + return r.finalize(singular) + } + + switch rune(singular[len(singular)-1]) { + case 's', 'x', 'z': + plural = esPlural(singular) + case 'y': + sl := rune(singular[len(singular)-2]) + if isConsonant(sl) { + plural = iesPlural(singular) + } else { + plural = sPlural(singular) + } + case 'h': + sl := rune(singular[len(singular)-2]) + if sl == 'c' || sl == 's' { + plural = esPlural(singular) + } else { + plural = sPlural(singular) + } + case 'e': + sl := rune(singular[len(singular)-2]) + if sl == 'f' { + plural = vesPlural(singular[:len(singular)-1]) + } else { + plural = sPlural(singular) + } + case 'f': + plural = vesPlural(singular) + default: + plural = sPlural(singular) + } + return r.finalize(plural) +} + +func iesPlural(singular string) string { + return singular[:len(singular)-1] + "ies" +} + +func vesPlural(singular string) string { + return singular[:len(singular)-1] + "ves" +} + +func esPlural(singular string) string { + return singular + "es" +} + +func sPlural(singular string) string { + return singular + "s" +} + +func isConsonant(char rune) bool { + for _, c := range consonants { + if char == c { + return true + } + } + return false +} diff --git a/vendor/k8s.io/gengo/parser/doc.go b/vendor/k8s.io/gengo/parser/doc.go new file mode 100644 index 00000000..8231b6d4 --- /dev/null +++ b/vendor/k8s.io/gengo/parser/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package parser provides code to parse go files, type-check them, extract the +// types. +package parser // import "k8s.io/gengo/parser" diff --git a/vendor/k8s.io/gengo/parser/parse.go b/vendor/k8s.io/gengo/parser/parse.go new file mode 100644 index 00000000..6a3d53b2 --- /dev/null +++ b/vendor/k8s.io/gengo/parser/parse.go @@ -0,0 +1,822 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parser + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + tc "go/types" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "sort" + "strings" + + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// This clarifies when a pkg path has been canonicalized. +type importPathString string + +// Builder lets you add all the go files in all the packages that you care +// about, then constructs the type source data. +type Builder struct { + context *build.Context + + // If true, include *_test.go + IncludeTestFiles bool + + // Map of package names to more canonical information about the package. + // This might hold the same value for multiple names, e.g. if someone + // referenced ./pkg/name or in the case of vendoring, which canonicalizes + // differently that what humans would type. + buildPackages map[string]*build.Package + + fset *token.FileSet + // map of package path to list of parsed files + parsed map[importPathString][]parsedFile + // map of package path to absolute path (to prevent overlap) + absPaths map[importPathString]string + + // Set by typeCheckPackage(), used by importPackage() and friends. + typeCheckedPackages map[importPathString]*tc.Package + + // Map of package path to whether the user requested it or it was from + // an import. + userRequested map[importPathString]bool + + // All comments from everywhere in every parsed file. + endLineToCommentGroup map[fileLine]*ast.CommentGroup + + // map of package to list of packages it imports. + importGraph map[importPathString]map[string]struct{} +} + +// parsedFile is for tracking files with name +type parsedFile struct { + name string + file *ast.File +} + +// key type for finding comments. +type fileLine struct { + file string + line int +} + +// New constructs a new builder. +func New() *Builder { + c := build.Default + if c.GOROOT == "" { + if p, err := exec.Command("which", "go").CombinedOutput(); err == nil { + // The returned string will have some/path/bin/go, so remove the last two elements. + c.GOROOT = filepath.Dir(filepath.Dir(strings.Trim(string(p), "\n"))) + } else { + klog.Warningf("Warning: $GOROOT not set, and unable to run `which go` to find it: %v\n", err) + } + } + // Force this to off, since we don't properly parse CGo. All symbols must + // have non-CGo equivalents. + c.CgoEnabled = false + return &Builder{ + context: &c, + buildPackages: map[string]*build.Package{}, + typeCheckedPackages: map[importPathString]*tc.Package{}, + fset: token.NewFileSet(), + parsed: map[importPathString][]parsedFile{}, + absPaths: map[importPathString]string{}, + userRequested: map[importPathString]bool{}, + endLineToCommentGroup: map[fileLine]*ast.CommentGroup{}, + importGraph: map[importPathString]map[string]struct{}{}, + } +} + +// AddBuildTags adds the specified build tags to the parse context. +func (b *Builder) AddBuildTags(tags ...string) { + b.context.BuildTags = append(b.context.BuildTags, tags...) +} + +// Get package information from the go/build package. Automatically excludes +// e.g. test files and files for other platforms-- there is quite a bit of +// logic of that nature in the build package. +func (b *Builder) importBuildPackage(dir string) (*build.Package, error) { + if buildPkg, ok := b.buildPackages[dir]; ok { + return buildPkg, nil + } + // This validates the `package foo // github.com/bar/foo` comments. + buildPkg, err := b.importWithMode(dir, build.ImportComment) + if err != nil { + if _, ok := err.(*build.NoGoError); !ok { + return nil, fmt.Errorf("unable to import %q: %v", dir, err) + } + } + if buildPkg == nil { + // Might be an empty directory. Try to just find the dir. + buildPkg, err = b.importWithMode(dir, build.FindOnly) + if err != nil { + return nil, err + } + } + + // Remember it under the user-provided name. + klog.V(5).Infof("saving buildPackage %s", dir) + b.buildPackages[dir] = buildPkg + canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) + if dir != string(canonicalPackage) { + // Since `dir` is not the canonical name, see if we knew it under another name. + if buildPkg, ok := b.buildPackages[string(canonicalPackage)]; ok { + return buildPkg, nil + } + // Must be new, save it under the canonical name, too. + klog.V(5).Infof("saving buildPackage %s", canonicalPackage) + b.buildPackages[string(canonicalPackage)] = buildPkg + } + + return buildPkg, nil +} + +// AddFileForTest adds a file to the set, without verifying that the provided +// pkg actually exists on disk. The pkg must be of the form "canonical/pkg/path" +// and the path must be the absolute path to the file. Because this bypasses +// the normal recursive finding of package dependencies (on disk), test should +// sort their test files topologically first, so all deps are resolved by the +// time we need them. +func (b *Builder) AddFileForTest(pkg string, path string, src []byte) error { + if err := b.addFile(importPathString(pkg), path, src, true); err != nil { + return err + } + if _, err := b.typeCheckPackage(importPathString(pkg)); err != nil { + return err + } + return nil +} + +// addFile adds a file to the set. The pkgPath must be of the form +// "canonical/pkg/path" and the path must be the absolute path to the file. A +// flag indicates whether this file was user-requested or just from following +// the import graph. +func (b *Builder) addFile(pkgPath importPathString, path string, src []byte, userRequested bool) error { + for _, p := range b.parsed[pkgPath] { + if path == p.name { + klog.V(5).Infof("addFile %s %s already parsed, skipping", pkgPath, path) + return nil + } + } + klog.V(6).Infof("addFile %s %s", pkgPath, path) + p, err := parser.ParseFile(b.fset, path, src, parser.DeclarationErrors|parser.ParseComments) + if err != nil { + return err + } + + // This is redundant with addDir, but some tests call AddFileForTest, which + // call into here without calling addDir. + b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath] + + b.parsed[pkgPath] = append(b.parsed[pkgPath], parsedFile{path, p}) + for _, c := range p.Comments { + position := b.fset.Position(c.End()) + b.endLineToCommentGroup[fileLine{position.Filename, position.Line}] = c + } + + // We have to get the packages from this specific file, in case the + // user added individual files instead of entire directories. + if b.importGraph[pkgPath] == nil { + b.importGraph[pkgPath] = map[string]struct{}{} + } + for _, im := range p.Imports { + importedPath := strings.Trim(im.Path.Value, `"`) + b.importGraph[pkgPath][importedPath] = struct{}{} + } + return nil +} + +// AddDir adds an entire directory, scanning it for go files. 'dir' should have +// a single go package in it. GOPATH, GOROOT, and the location of your go +// binary (`which go`) will all be searched if dir doesn't literally resolve. +func (b *Builder) AddDir(dir string) error { + _, err := b.importPackage(dir, true) + return err +} + +// AddDirRecursive is just like AddDir, but it also recursively adds +// subdirectories; it returns an error only if the path couldn't be resolved; +// any directories recursed into without go source are ignored. +func (b *Builder) AddDirRecursive(dir string) error { + // Add the root. + if _, err := b.importPackage(dir, true); err != nil { + klog.Warningf("Ignoring directory %v: %v", dir, err) + } + + // filepath.Walk includes the root dir, but we already did that, so we'll + // remove that prefix and rebuild a package import path. + prefix := b.buildPackages[dir].Dir + fn := func(filePath string, info os.FileInfo, err error) error { + if info != nil && info.IsDir() { + rel := filepath.ToSlash(strings.TrimPrefix(filePath, prefix)) + if rel != "" { + // Make a pkg path. + pkg := path.Join(string(canonicalizeImportPath(b.buildPackages[dir].ImportPath)), rel) + + // Add it. + if _, err := b.importPackage(pkg, true); err != nil { + klog.Warningf("Ignoring child directory %v: %v", pkg, err) + } + } + } + return nil + } + if err := filepath.Walk(b.buildPackages[dir].Dir, fn); err != nil { + return err + } + return nil +} + +// AddDirTo adds an entire directory to a given Universe. Unlike AddDir, this +// processes the package immediately, which makes it safe to use from within a +// generator (rather than just at init time. 'dir' must be a single go package. +// GOPATH, GOROOT, and the location of your go binary (`which go`) will all be +// searched if dir doesn't literally resolve. +// Deprecated. Please use AddDirectoryTo. +func (b *Builder) AddDirTo(dir string, u *types.Universe) error { + // We want all types from this package, as if they were directly added + // by the user. They WERE added by the user, in effect. + if _, err := b.importPackage(dir, true); err != nil { + return err + } + return b.findTypesIn(canonicalizeImportPath(b.buildPackages[dir].ImportPath), u) +} + +// AddDirectoryTo adds an entire directory to a given Universe. Unlike AddDir, +// this processes the package immediately, which makes it safe to use from +// within a generator (rather than just at init time. 'dir' must be a single go +// package. GOPATH, GOROOT, and the location of your go binary (`which go`) +// will all be searched if dir doesn't literally resolve. +func (b *Builder) AddDirectoryTo(dir string, u *types.Universe) (*types.Package, error) { + // We want all types from this package, as if they were directly added + // by the user. They WERE added by the user, in effect. + if _, err := b.importPackage(dir, true); err != nil { + return nil, err + } + path := canonicalizeImportPath(b.buildPackages[dir].ImportPath) + if err := b.findTypesIn(path, u); err != nil { + return nil, err + } + return u.Package(string(path)), nil +} + +// The implementation of AddDir. A flag indicates whether this directory was +// user-requested or just from following the import graph. +func (b *Builder) addDir(dir string, userRequested bool) error { + klog.V(5).Infof("addDir %s", dir) + buildPkg, err := b.importBuildPackage(dir) + if err != nil { + return err + } + canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) + pkgPath := canonicalPackage + if dir != string(canonicalPackage) { + klog.V(5).Infof("addDir %s, canonical path is %s", dir, pkgPath) + } + + // Sanity check the pkg dir has not changed. + if prev, found := b.absPaths[pkgPath]; found { + if buildPkg.Dir != prev { + return fmt.Errorf("package %q (%s) previously resolved to %s", pkgPath, buildPkg.Dir, prev) + } + } else { + b.absPaths[pkgPath] = buildPkg.Dir + } + + files := []string{} + files = append(files, buildPkg.GoFiles...) + if b.IncludeTestFiles { + files = append(files, buildPkg.TestGoFiles...) + } + + for _, file := range files { + if !strings.HasSuffix(file, ".go") { + continue + } + absPath := filepath.Join(buildPkg.Dir, file) + data, err := ioutil.ReadFile(absPath) + if err != nil { + return fmt.Errorf("while loading %q: %v", absPath, err) + } + err = b.addFile(pkgPath, absPath, data, userRequested) + if err != nil { + return fmt.Errorf("while parsing %q: %v", absPath, err) + } + } + return nil +} + +// importPackage is a function that will be called by the type check package when it +// needs to import a go package. 'path' is the import path. +func (b *Builder) importPackage(dir string, userRequested bool) (*tc.Package, error) { + klog.V(5).Infof("importPackage %s", dir) + var pkgPath = importPathString(dir) + + // Get the canonical path if we can. + if buildPkg := b.buildPackages[dir]; buildPkg != nil { + canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) + klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage) + pkgPath = canonicalPackage + } + + // If we have not seen this before, process it now. + ignoreError := false + if _, found := b.parsed[pkgPath]; !found { + // Ignore errors in paths that we're importing solely because + // they're referenced by other packages. + ignoreError = true + + // Add it. + if err := b.addDir(dir, userRequested); err != nil { + return nil, err + } + + // Get the canonical path now that it has been added. + if buildPkg := b.buildPackages[dir]; buildPkg != nil { + canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) + klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage) + pkgPath = canonicalPackage + } + } + + // If it was previously known, just check that the user-requestedness hasn't + // changed. + b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath] + + // Run the type checker. We may end up doing this to pkgs that are already + // done, or are in the queue to be done later, but it will short-circuit, + // and we can't miss pkgs that are only depended on. + pkg, err := b.typeCheckPackage(pkgPath) + if err != nil { + switch { + case ignoreError && pkg != nil: + klog.V(2).Infof("type checking encountered some issues in %q, but ignoring.\n", pkgPath) + case !ignoreError && pkg != nil: + klog.V(2).Infof("type checking encountered some errors in %q\n", pkgPath) + return nil, err + default: + return nil, err + } + } + + return pkg, nil +} + +type importAdapter struct { + b *Builder +} + +func (a importAdapter) Import(path string) (*tc.Package, error) { + return a.b.importPackage(path, false) +} + +// typeCheckPackage will attempt to return the package even if there are some +// errors, so you may check whether the package is nil or not even if you get +// an error. +func (b *Builder) typeCheckPackage(pkgPath importPathString) (*tc.Package, error) { + klog.V(5).Infof("typeCheckPackage %s", pkgPath) + if pkg, ok := b.typeCheckedPackages[pkgPath]; ok { + if pkg != nil { + klog.V(6).Infof("typeCheckPackage %s already done", pkgPath) + return pkg, nil + } + // We store a nil right before starting work on a package. So + // if we get here and it's present and nil, that means there's + // another invocation of this function on the call stack + // already processing this package. + return nil, fmt.Errorf("circular dependency for %q", pkgPath) + } + parsedFiles, ok := b.parsed[pkgPath] + if !ok { + return nil, fmt.Errorf("No files for pkg %q", pkgPath) + } + files := make([]*ast.File, len(parsedFiles)) + for i := range parsedFiles { + files[i] = parsedFiles[i].file + } + b.typeCheckedPackages[pkgPath] = nil + c := tc.Config{ + IgnoreFuncBodies: true, + // Note that importAdapter can call b.importPackage which calls this + // method. So there can't be cycles in the import graph. + Importer: importAdapter{b}, + Error: func(err error) { + klog.V(2).Infof("type checker: %v\n", err) + }, + } + pkg, err := c.Check(string(pkgPath), b.fset, files, nil) + b.typeCheckedPackages[pkgPath] = pkg // record the result whether or not there was an error + return pkg, err +} + +// FindPackages fetches a list of the user-imported packages. +// Note that you need to call b.FindTypes() first. +func (b *Builder) FindPackages() []string { + // Iterate packages in a predictable order. + pkgPaths := []string{} + for k := range b.typeCheckedPackages { + pkgPaths = append(pkgPaths, string(k)) + } + sort.Strings(pkgPaths) + + result := []string{} + for _, pkgPath := range pkgPaths { + if b.userRequested[importPathString(pkgPath)] { + // Since walkType is recursive, all types that are in packages that + // were directly mentioned will be included. We don't need to + // include all types in all transitive packages, though. + result = append(result, pkgPath) + } + } + return result +} + +// FindTypes finalizes the package imports, and searches through all the +// packages for types. +func (b *Builder) FindTypes() (types.Universe, error) { + // Take a snapshot of pkgs to iterate, since this will recursively mutate + // b.parsed. Iterate in a predictable order. + pkgPaths := []string{} + for pkgPath := range b.parsed { + pkgPaths = append(pkgPaths, string(pkgPath)) + } + sort.Strings(pkgPaths) + + u := types.Universe{} + for _, pkgPath := range pkgPaths { + if err := b.findTypesIn(importPathString(pkgPath), &u); err != nil { + return nil, err + } + } + return u, nil +} + +// findTypesIn finalizes the package import and searches through the package +// for types. +func (b *Builder) findTypesIn(pkgPath importPathString, u *types.Universe) error { + klog.V(5).Infof("findTypesIn %s", pkgPath) + pkg := b.typeCheckedPackages[pkgPath] + if pkg == nil { + return fmt.Errorf("findTypesIn(%s): package is not known", pkgPath) + } + if !b.userRequested[pkgPath] { + // Since walkType is recursive, all types that the + // packages they asked for depend on will be included. + // But we don't need to include all types in all + // *packages* they depend on. + klog.V(5).Infof("findTypesIn %s: package is not user requested", pkgPath) + return nil + } + + // We're keeping this package. This call will create the record. + u.Package(string(pkgPath)).Name = pkg.Name() + u.Package(string(pkgPath)).Path = pkg.Path() + u.Package(string(pkgPath)).SourcePath = b.absPaths[pkgPath] + + for _, f := range b.parsed[pkgPath] { + if _, fileName := filepath.Split(f.name); fileName == "doc.go" { + tp := u.Package(string(pkgPath)) + // findTypesIn might be called multiple times. Clean up tp.Comments + // to avoid repeatedly fill same comments to it. + tp.Comments = []string{} + for i := range f.file.Comments { + tp.Comments = append(tp.Comments, splitLines(f.file.Comments[i].Text())...) + } + if f.file.Doc != nil { + tp.DocComments = splitLines(f.file.Doc.Text()) + } + } + } + + s := pkg.Scope() + for _, n := range s.Names() { + obj := s.Lookup(n) + tn, ok := obj.(*tc.TypeName) + if ok { + t := b.walkType(*u, nil, tn.Type()) + c1 := b.priorCommentLines(obj.Pos(), 1) + // c1.Text() is safe if c1 is nil + t.CommentLines = splitLines(c1.Text()) + if c1 == nil { + t.SecondClosestCommentLines = splitLines(b.priorCommentLines(obj.Pos(), 2).Text()) + } else { + t.SecondClosestCommentLines = splitLines(b.priorCommentLines(c1.List[0].Slash, 2).Text()) + } + } + tf, ok := obj.(*tc.Func) + // We only care about functions, not concrete/abstract methods. + if ok && tf.Type() != nil && tf.Type().(*tc.Signature).Recv() == nil { + t := b.addFunction(*u, nil, tf) + c1 := b.priorCommentLines(obj.Pos(), 1) + // c1.Text() is safe if c1 is nil + t.CommentLines = splitLines(c1.Text()) + if c1 == nil { + t.SecondClosestCommentLines = splitLines(b.priorCommentLines(obj.Pos(), 2).Text()) + } else { + t.SecondClosestCommentLines = splitLines(b.priorCommentLines(c1.List[0].Slash, 2).Text()) + } + } + tv, ok := obj.(*tc.Var) + if ok && !tv.IsField() { + b.addVariable(*u, nil, tv) + } + } + + importedPkgs := []string{} + for k := range b.importGraph[pkgPath] { + importedPkgs = append(importedPkgs, string(k)) + } + sort.Strings(importedPkgs) + for _, p := range importedPkgs { + u.AddImports(string(pkgPath), p) + } + return nil +} + +func (b *Builder) importWithMode(dir string, mode build.ImportMode) (*build.Package, error) { + // This is a bit of a hack. The srcDir argument to Import() should + // properly be the dir of the file which depends on the package to be + // imported, so that vendoring can work properly and local paths can + // resolve. We assume that there is only one level of vendoring, and that + // the CWD is inside the GOPATH, so this should be safe. Nobody should be + // using local (relative) paths except on the CLI, so CWD is also + // sufficient. + cwd, err := os.Getwd() + if err != nil { + return nil, fmt.Errorf("unable to get current directory: %v", err) + } + buildPkg, err := b.context.Import(dir, cwd, mode) + if err != nil { + return nil, err + } + return buildPkg, nil +} + +// if there's a comment on the line `lines` before pos, return its text, otherwise "". +func (b *Builder) priorCommentLines(pos token.Pos, lines int) *ast.CommentGroup { + position := b.fset.Position(pos) + key := fileLine{position.Filename, position.Line - lines} + return b.endLineToCommentGroup[key] +} + +func splitLines(str string) []string { + return strings.Split(strings.TrimRight(str, "\n"), "\n") +} + +func tcFuncNameToName(in string) types.Name { + name := strings.TrimPrefix(in, "func ") + nameParts := strings.Split(name, "(") + return tcNameToName(nameParts[0]) +} + +func tcVarNameToName(in string) types.Name { + nameParts := strings.Split(in, " ") + // nameParts[0] is "var". + // nameParts[2:] is the type of the variable, we ignore it for now. + return tcNameToName(nameParts[1]) +} + +func tcNameToName(in string) types.Name { + // Detect anonymous type names. (These may have '.' characters because + // embedded types may have packages, so we detect them specially.) + if strings.HasPrefix(in, "struct{") || + strings.HasPrefix(in, "<-chan") || + strings.HasPrefix(in, "chan<-") || + strings.HasPrefix(in, "chan ") || + strings.HasPrefix(in, "func(") || + strings.HasPrefix(in, "*") || + strings.HasPrefix(in, "map[") || + strings.HasPrefix(in, "[") { + return types.Name{Name: in} + } + + // Otherwise, if there are '.' characters present, the name has a + // package path in front. + nameParts := strings.Split(in, ".") + name := types.Name{Name: in} + if n := len(nameParts); n >= 2 { + // The final "." is the name of the type--previous ones must + // have been in the package path. + name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1] + } + return name +} + +func (b *Builder) convertSignature(u types.Universe, t *tc.Signature) *types.Signature { + signature := &types.Signature{} + for i := 0; i < t.Params().Len(); i++ { + signature.Parameters = append(signature.Parameters, b.walkType(u, nil, t.Params().At(i).Type())) + } + for i := 0; i < t.Results().Len(); i++ { + signature.Results = append(signature.Results, b.walkType(u, nil, t.Results().At(i).Type())) + } + if r := t.Recv(); r != nil { + signature.Receiver = b.walkType(u, nil, r.Type()) + } + signature.Variadic = t.Variadic() + return signature +} + +// walkType adds the type, and any necessary child types. +func (b *Builder) walkType(u types.Universe, useName *types.Name, in tc.Type) *types.Type { + // Most of the cases are underlying types of the named type. + name := tcNameToName(in.String()) + if useName != nil { + name = *useName + } + + switch t := in.(type) { + case *tc.Struct: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Struct + for i := 0; i < t.NumFields(); i++ { + f := t.Field(i) + m := types.Member{ + Name: f.Name(), + Embedded: f.Anonymous(), + Tags: t.Tag(i), + Type: b.walkType(u, nil, f.Type()), + CommentLines: splitLines(b.priorCommentLines(f.Pos(), 1).Text()), + } + out.Members = append(out.Members, m) + } + return out + case *tc.Map: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Map + out.Elem = b.walkType(u, nil, t.Elem()) + out.Key = b.walkType(u, nil, t.Key()) + return out + case *tc.Pointer: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Pointer + out.Elem = b.walkType(u, nil, t.Elem()) + return out + case *tc.Slice: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Slice + out.Elem = b.walkType(u, nil, t.Elem()) + return out + case *tc.Array: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Array + out.Elem = b.walkType(u, nil, t.Elem()) + // TODO: need to store array length, otherwise raw type name + // cannot be properly written. + return out + case *tc.Chan: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Chan + out.Elem = b.walkType(u, nil, t.Elem()) + // TODO: need to store direction, otherwise raw type name + // cannot be properly written. + return out + case *tc.Basic: + out := u.Type(types.Name{ + Package: "", + Name: t.Name(), + }) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Unsupported + return out + case *tc.Signature: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Func + out.Signature = b.convertSignature(u, t) + return out + case *tc.Interface: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Interface + t.Complete() + for i := 0; i < t.NumMethods(); i++ { + if out.Methods == nil { + out.Methods = map[string]*types.Type{} + } + out.Methods[t.Method(i).Name()] = b.walkType(u, nil, t.Method(i).Type()) + } + return out + case *tc.Named: + var out *types.Type + switch t.Underlying().(type) { + case *tc.Named, *tc.Basic, *tc.Map, *tc.Slice: + name := tcNameToName(t.String()) + out = u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Alias + out.Underlying = b.walkType(u, nil, t.Underlying()) + default: + // tc package makes everything "named" with an + // underlying anonymous type--we remove that annoying + // "feature" for users. This flattens those types + // together. + name := tcNameToName(t.String()) + if out := u.Type(name); out.Kind != types.Unknown { + return out // short circuit if we've already made this. + } + out = b.walkType(u, &name, t.Underlying()) + } + // If the underlying type didn't already add methods, add them. + // (Interface types will have already added methods.) + if len(out.Methods) == 0 { + for i := 0; i < t.NumMethods(); i++ { + if out.Methods == nil { + out.Methods = map[string]*types.Type{} + } + out.Methods[t.Method(i).Name()] = b.walkType(u, nil, t.Method(i).Type()) + } + } + return out + default: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Unsupported + klog.Warningf("Making unsupported type entry %q for: %#v\n", out, t) + return out + } +} + +func (b *Builder) addFunction(u types.Universe, useName *types.Name, in *tc.Func) *types.Type { + name := tcFuncNameToName(in.String()) + if useName != nil { + name = *useName + } + out := u.Function(name) + out.Kind = types.DeclarationOf + out.Underlying = b.walkType(u, nil, in.Type()) + return out +} + +func (b *Builder) addVariable(u types.Universe, useName *types.Name, in *tc.Var) *types.Type { + name := tcVarNameToName(in.String()) + if useName != nil { + name = *useName + } + out := u.Variable(name) + out.Kind = types.DeclarationOf + out.Underlying = b.walkType(u, nil, in.Type()) + return out +} + +// canonicalizeImportPath takes an import path and returns the actual package. +// It doesn't support nested vendoring. +func canonicalizeImportPath(importPath string) importPathString { + if !strings.Contains(importPath, "/vendor/") { + return importPathString(importPath) + } + + return importPathString(importPath[strings.Index(importPath, "/vendor/")+len("/vendor/"):]) +} diff --git a/vendor/k8s.io/gengo/types/comments.go b/vendor/k8s.io/gengo/types/comments.go new file mode 100644 index 00000000..8150c383 --- /dev/null +++ b/vendor/k8s.io/gengo/types/comments.go @@ -0,0 +1,82 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types contains go type information, packaged in a way that makes +// auto-generation convenient, whether by template or straight go functions. +package types + +import ( + "fmt" + "strings" +) + +// ExtractCommentTags parses comments for lines of the form: +// +// 'marker' + "key=value". +// +// Values are optional; "" is the default. A tag can be specified more than +// one time and all values are returned. If the resulting map has an entry for +// a key, the value (a slice) is guaranteed to have at least 1 element. +// +// Example: if you pass "+" for 'marker', and the following lines are in +// the comments: +// +foo=value1 +// +bar +// +foo=value2 +// +baz="qux" +// Then this function will return: +// map[string][]string{"foo":{"value1, "value2"}, "bar": {""}, "baz": {"qux"}} +func ExtractCommentTags(marker string, lines []string) map[string][]string { + out := map[string][]string{} + for _, line := range lines { + line = strings.Trim(line, " ") + if len(line) == 0 { + continue + } + if !strings.HasPrefix(line, marker) { + continue + } + // TODO: we could support multiple values per key if we split on spaces + kv := strings.SplitN(line[len(marker):], "=", 2) + if len(kv) == 2 { + out[kv[0]] = append(out[kv[0]], kv[1]) + } else if len(kv) == 1 { + out[kv[0]] = append(out[kv[0]], "") + } + } + return out +} + +// ExtractSingleBoolCommentTag parses comments for lines of the form: +// +// 'marker' + "key=value1" +// +// If the tag is not found, the default value is returned. Values are asserted +// to be boolean ("true" or "false"), and any other value will cause an error +// to be returned. If the key has multiple values, the first one will be used. +func ExtractSingleBoolCommentTag(marker string, key string, defaultVal bool, lines []string) (bool, error) { + values := ExtractCommentTags(marker, lines)[key] + if values == nil { + return defaultVal, nil + } + if values[0] == "true" { + return true, nil + } + if values[0] == "false" { + return false, nil + } + return false, fmt.Errorf("tag value for %q is not boolean: %q", key, values[0]) +} diff --git a/vendor/k8s.io/gengo/types/doc.go b/vendor/k8s.io/gengo/types/doc.go new file mode 100644 index 00000000..74a969a7 --- /dev/null +++ b/vendor/k8s.io/gengo/types/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types contains go type information, packaged in a way that makes +// auto-generation convenient, whether by template or straight go functions. +package types // import "k8s.io/gengo/types" diff --git a/vendor/k8s.io/gengo/types/flatten.go b/vendor/k8s.io/gengo/types/flatten.go new file mode 100644 index 00000000..585014e8 --- /dev/null +++ b/vendor/k8s.io/gengo/types/flatten.go @@ -0,0 +1,57 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// FlattenMembers recursively takes any embedded members and puts them in the +// top level, correctly hiding them if the top level hides them. There must not +// be a cycle-- that implies infinite members. +// +// This is useful for e.g. computing all the valid keys in a json struct, +// properly considering any configuration of embedded structs. +func FlattenMembers(m []Member) []Member { + embedded := []Member{} + normal := []Member{} + type nameInfo struct { + top bool + i int + } + names := map[string]nameInfo{} + for i := range m { + if m[i].Embedded && m[i].Type.Kind == Struct { + embedded = append(embedded, m[i]) + } else { + normal = append(normal, m[i]) + names[m[i].Name] = nameInfo{true, len(normal) - 1} + } + } + for i := range embedded { + for _, e := range FlattenMembers(embedded[i].Type.Members) { + if info, found := names[e.Name]; found { + if info.top { + continue + } + if n := normal[info.i]; n.Name == e.Name && n.Type == e.Type { + continue + } + panic("conflicting members") + } + normal = append(normal, e) + names[e.Name] = nameInfo{false, len(normal) - 1} + } + } + return normal +} diff --git a/vendor/k8s.io/gengo/types/types.go b/vendor/k8s.io/gengo/types/types.go new file mode 100644 index 00000000..ec25248e --- /dev/null +++ b/vendor/k8s.io/gengo/types/types.go @@ -0,0 +1,499 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "strings" + +// Ref makes a reference to the given type. It can only be used for e.g. +// passing to namers. +func Ref(packageName, typeName string) *Type { + return &Type{Name: Name{ + Name: typeName, + Package: packageName, + }} +} + +// A type name may have a package qualifier. +type Name struct { + // Empty if embedded or builtin. This is the package path unless Path is specified. + Package string + // The type name. + Name string + // An optional location of the type definition for languages that can have disjoint + // packages and paths. + Path string +} + +// String returns the name formatted as a string. +func (n Name) String() string { + if n.Package == "" { + return n.Name + } + return n.Package + "." + n.Name +} + +// ParseFullyQualifiedName parses a name like k8s.io/kubernetes/pkg/api.Pod into a Name. +func ParseFullyQualifiedName(fqn string) Name { + cs := strings.Split(fqn, ".") + pkg := "" + if len(cs) > 1 { + pkg = strings.Join(cs[0:len(cs)-1], ".") + } + return Name{ + Name: cs[len(cs)-1], + Package: pkg, + } +} + +// The possible classes of types. +type Kind string + +const ( + // Builtin is a primitive, like bool, string, int. + Builtin Kind = "Builtin" + Struct Kind = "Struct" + Map Kind = "Map" + Slice Kind = "Slice" + Pointer Kind = "Pointer" + + // Alias is an alias of another type, e.g. in: + // type Foo string + // type Bar Foo + // Bar is an alias of Foo. + // + // In the real go type system, Foo is a "Named" string; but to simplify + // generation, this type system will just say that Foo *is* a builtin. + // We then need "Alias" as a way for us to say that Bar *is* a Foo. + Alias Kind = "Alias" + + // Interface is any type that could have differing types at run time. + Interface Kind = "Interface" + + // The remaining types are included for completeness, but are not well + // supported. + Array Kind = "Array" // Array is just like slice, but has a fixed length. + Chan Kind = "Chan" + Func Kind = "Func" + + // DeclarationOf is different from other Kinds; it indicates that instead of + // representing an actual Type, the type is a declaration of an instance of + // a type. E.g., a top-level function, variable, or constant. See the + // comment for Type.Name for more detail. + DeclarationOf Kind = "DeclarationOf" + Unknown Kind = "" + Unsupported Kind = "Unsupported" + + // Protobuf is protobuf type. + Protobuf Kind = "Protobuf" +) + +// Package holds package-level information. +// Fields are public, as everything in this package, to enable consumption by +// templates (for example). But it is strongly encouraged for code to build by +// using the provided functions. +type Package struct { + // Canonical name of this package-- its path. + Path string + + // The location this package was loaded from + SourcePath string + + // Short name of this package; the name that appears in the + // 'package x' line. + Name string + + // The comment right above the package declaration in doc.go, if any. + DocComments []string + + // All comments from doc.go, if any. + // TODO: remove Comments and use DocComments everywhere. + Comments []string + + // Types within this package, indexed by their name (*not* including + // package name). + Types map[string]*Type + + // Functions within this package, indexed by their name (*not* including + // package name). + Functions map[string]*Type + + // Global variables within this package, indexed by their name (*not* including + // package name). + Variables map[string]*Type + + // Packages imported by this package, indexed by (canonicalized) + // package path. + Imports map[string]*Package +} + +// Has returns true if the given name references a type known to this package. +func (p *Package) Has(name string) bool { + _, has := p.Types[name] + return has +} + +// Type gets the given Type in this Package. If the Type is not already +// defined, this will add it and return the new Type value. The caller is +// expected to finish initialization. +func (p *Package) Type(typeName string) *Type { + if t, ok := p.Types[typeName]; ok { + return t + } + if p.Path == "" { + // Import the standard builtin types! + if t, ok := builtins.Types[typeName]; ok { + p.Types[typeName] = t + return t + } + } + t := &Type{Name: Name{Package: p.Path, Name: typeName}} + p.Types[typeName] = t + return t +} + +// Function gets the given function Type in this Package. If the function is +// not already defined, this will add it. If a function is added, it's the +// caller's responsibility to finish construction of the function by setting +// Underlying to the correct type. +func (p *Package) Function(funcName string) *Type { + if t, ok := p.Functions[funcName]; ok { + return t + } + t := &Type{Name: Name{Package: p.Path, Name: funcName}} + t.Kind = DeclarationOf + p.Functions[funcName] = t + return t +} + +// Variable gets the given variable Type in this Package. If the variable is +// not already defined, this will add it. If a variable is added, it's the caller's +// responsibility to finish construction of the variable by setting Underlying +// to the correct type. +func (p *Package) Variable(varName string) *Type { + if t, ok := p.Variables[varName]; ok { + return t + } + t := &Type{Name: Name{Package: p.Path, Name: varName}} + t.Kind = DeclarationOf + p.Variables[varName] = t + return t +} + +// HasImport returns true if p imports packageName. Package names include the +// package directory. +func (p *Package) HasImport(packageName string) bool { + _, has := p.Imports[packageName] + return has +} + +// Universe is a map of all packages. The key is the package name, but you +// should use Package(), Type(), Function(), or Variable() instead of direct +// access. +type Universe map[string]*Package + +// Type returns the canonical type for the given fully-qualified name. Builtin +// types will always be found, even if they haven't been explicitly added to +// the map. If a non-existing type is requested, this will create (a marker for) +// it. +func (u Universe) Type(n Name) *Type { + return u.Package(n.Package).Type(n.Name) +} + +// Function returns the canonical function for the given fully-qualified name. +// If a non-existing function is requested, this will create (a marker for) it. +// If a marker is created, it's the caller's responsibility to finish +// construction of the function by setting Underlying to the correct type. +func (u Universe) Function(n Name) *Type { + return u.Package(n.Package).Function(n.Name) +} + +// Variable returns the canonical variable for the given fully-qualified name. +// If a non-existing variable is requested, this will create (a marker for) it. +// If a marker is created, it's the caller's responsibility to finish +// construction of the variable by setting Underlying to the correct type. +func (u Universe) Variable(n Name) *Type { + return u.Package(n.Package).Variable(n.Name) +} + +// AddImports registers import lines for packageName. May be called multiple times. +// You are responsible for canonicalizing all package paths. +func (u Universe) AddImports(packagePath string, importPaths ...string) { + p := u.Package(packagePath) + for _, i := range importPaths { + p.Imports[i] = u.Package(i) + } +} + +// Package returns the Package for the given path. +// If a non-existing package is requested, this will create (a marker for) it. +// If a marker is created, it's the caller's responsibility to finish +// construction of the package. +func (u Universe) Package(packagePath string) *Package { + if p, ok := u[packagePath]; ok { + return p + } + p := &Package{ + Path: packagePath, + Types: map[string]*Type{}, + Functions: map[string]*Type{}, + Variables: map[string]*Type{}, + Imports: map[string]*Package{}, + } + u[packagePath] = p + return p +} + +// Type represents a subset of possible go types. +type Type struct { + // There are two general categories of types, those explicitly named + // and those anonymous. Named ones will have a non-empty package in the + // name field. + // + // An exception: If Kind == DeclarationOf, then this name is the name of a + // top-level function, variable, or const, and the type can be found in Underlying. + // We do this to allow the naming system to work against these objects, even + // though they aren't strictly speaking types. + Name Name + + // The general kind of this type. + Kind Kind + + // If there are comment lines immediately before the type definition, + // they will be recorded here. + CommentLines []string + + // If there are comment lines preceding the `CommentLines`, they will be + // recorded here. There are two cases: + // --- + // SecondClosestCommentLines + // a blank line + // CommentLines + // type definition + // --- + // + // or + // --- + // SecondClosestCommentLines + // a blank line + // type definition + // --- + SecondClosestCommentLines []string + + // If Kind == Struct + Members []Member + + // If Kind == Map, Slice, Pointer, or Chan + Elem *Type + + // If Kind == Map, this is the map's key type. + Key *Type + + // If Kind == Alias, this is the underlying type. + // If Kind == DeclarationOf, this is the type of the declaration. + Underlying *Type + + // If Kind == Interface, this is the set of all required functions. + // Otherwise, if this is a named type, this is the list of methods that + // type has. (All elements will have Kind=="Func") + Methods map[string]*Type + + // If Kind == func, this is the signature of the function. + Signature *Signature + + // TODO: Add: + // * channel direction + // * array length +} + +// String returns the name of the type. +func (t *Type) String() string { + return t.Name.String() +} + +// IsPrimitive returns whether the type is a built-in type or is an alias to a +// built-in type. For example: strings and aliases of strings are primitives, +// structs are not. +func (t *Type) IsPrimitive() bool { + if t.Kind == Builtin || (t.Kind == Alias && t.Underlying.Kind == Builtin) { + return true + } + return false +} + +// IsAssignable returns whether the type is deep-assignable. For example, +// slices and maps and pointers are shallow copies, but ints and strings are +// complete. +func (t *Type) IsAssignable() bool { + if t.IsPrimitive() { + return true + } + if t.Kind == Struct { + for _, m := range t.Members { + if !m.Type.IsAssignable() { + return false + } + } + return true + } + return false +} + +// IsAnonymousStruct returns true if the type is an anonymous struct or an alias +// to an anonymous struct. +func (t *Type) IsAnonymousStruct() bool { + return (t.Kind == Struct && t.Name.Name == "struct{}") || (t.Kind == Alias && t.Underlying.IsAnonymousStruct()) +} + +// A single struct member +type Member struct { + // The name of the member. + Name string + + // If the member is embedded (anonymous) this will be true, and the + // Name will be the type name. + Embedded bool + + // If there are comment lines immediately before the member in the type + // definition, they will be recorded here. + CommentLines []string + + // If there are tags along with this member, they will be saved here. + Tags string + + // The type of this member. + Type *Type +} + +// String returns the name and type of the member. +func (m Member) String() string { + return m.Name + " " + m.Type.String() +} + +// Signature is a function's signature. +type Signature struct { + // TODO: store the parameter names, not just types. + + // If a method of some type, this is the type it's a member of. + Receiver *Type + Parameters []*Type + Results []*Type + + // True if the last in parameter is of the form ...T. + Variadic bool + + // If there are comment lines immediately before this + // signature/method/function declaration, they will be recorded here. + CommentLines []string +} + +// Built in types. +var ( + String = &Type{ + Name: Name{Name: "string"}, + Kind: Builtin, + } + Int64 = &Type{ + Name: Name{Name: "int64"}, + Kind: Builtin, + } + Int32 = &Type{ + Name: Name{Name: "int32"}, + Kind: Builtin, + } + Int16 = &Type{ + Name: Name{Name: "int16"}, + Kind: Builtin, + } + Int = &Type{ + Name: Name{Name: "int"}, + Kind: Builtin, + } + Uint64 = &Type{ + Name: Name{Name: "uint64"}, + Kind: Builtin, + } + Uint32 = &Type{ + Name: Name{Name: "uint32"}, + Kind: Builtin, + } + Uint16 = &Type{ + Name: Name{Name: "uint16"}, + Kind: Builtin, + } + Uint = &Type{ + Name: Name{Name: "uint"}, + Kind: Builtin, + } + Uintptr = &Type{ + Name: Name{Name: "uintptr"}, + Kind: Builtin, + } + Float64 = &Type{ + Name: Name{Name: "float64"}, + Kind: Builtin, + } + Float32 = &Type{ + Name: Name{Name: "float32"}, + Kind: Builtin, + } + Float = &Type{ + Name: Name{Name: "float"}, + Kind: Builtin, + } + Bool = &Type{ + Name: Name{Name: "bool"}, + Kind: Builtin, + } + Byte = &Type{ + Name: Name{Name: "byte"}, + Kind: Builtin, + } + + builtins = &Package{ + Types: map[string]*Type{ + "bool": Bool, + "string": String, + "int": Int, + "int64": Int64, + "int32": Int32, + "int16": Int16, + "int8": Byte, + "uint": Uint, + "uint64": Uint64, + "uint32": Uint32, + "uint16": Uint16, + "uint8": Byte, + "uintptr": Uintptr, + "byte": Byte, + "float": Float, + "float64": Float64, + "float32": Float32, + }, + Imports: map[string]*Package{}, + Path: "", + Name: "", + } +) + +func IsInteger(t *Type) bool { + switch t { + case Int, Int64, Int32, Int16, Uint, Uint64, Uint32, Uint16, Byte: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml index 0f508dae..5677664c 100644 --- a/vendor/k8s.io/klog/.travis.yml +++ b/vendor/k8s.io/klog/.travis.yml @@ -5,11 +5,12 @@ go: - 1.9.x - 1.10.x - 1.11.x + - 1.12.x script: - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d .) - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . + - go tool vet . || go vet . - go test -v -race ./... install: - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md index bee306f3..841468b4 100644 --- a/vendor/k8s.io/klog/README.md +++ b/vendor/k8s.io/klog/README.md @@ -31,7 +31,7 @@ How to use klog - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) -- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md)) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod new file mode 100644 index 00000000..3877d854 --- /dev/null +++ b/vendor/k8s.io/klog/go.mod @@ -0,0 +1,5 @@ +module k8s.io/klog + +go 1.12 + +require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum new file mode 100644 index 00000000..fb64d277 --- /dev/null +++ b/vendor/k8s.io/klog/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index b033996c..2712ce0a 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -20,17 +20,17 @@ // // Basic examples: // -// glog.Info("Prepare to repel boarders") +// klog.Info("Prepare to repel boarders") // -// glog.Fatalf("Initialization failed: %s", err) +// klog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // -// if glog.V(2) { -// glog.Info("Starting transaction...") +// if klog.V(2) { +// klog.Info("Starting transaction...") // } // -// glog.V(2).Infoln("Processed", nItems, "elements") +// klog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. @@ -142,7 +142,7 @@ func (s *severity) Set(value string) error { if v, ok := severityByName(value); ok { threshold = v } else { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -226,7 +226,7 @@ func (l *Level) Get() interface{} { // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -294,7 +294,7 @@ func (m *moduleSpec) Set(value string) error { return errVmoduleSyntax } pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) + v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } @@ -396,40 +396,23 @@ type flushSyncWriter interface { io.Writer } +// init sets up the defaults and runs flushDaemon. func init() { - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false go logging.flushDaemon() } -var initDefaultsOnce sync.Once - // InitFlags is for explicitly initializing the flags. func InitFlags(flagset *flag.FlagSet) { - - // Initialize defaults. - initDefaultsOnce.Do(func() { - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - // TODO: The default value of toStderr should be false. - // Ideally, toStderr can be deprecated. - // If --log-file is set, only log to the dedicated log-file. - // If --alsoToStderr is true, whether or not --log-file is set, we will - // log to stderr. - // Since kubernetes/kubernetes are currently using klog with - // default --toStderr to be true, we can't change this default value - // directly. - // As a compromise, when --log-file is set, the toStdErr is reset to - // be false. e.g. See function `IsSingleMode`. - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.skipLogHeaders = false - }) - if flagset == nil { flagset = flag.CommandLine } @@ -442,6 +425,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") @@ -477,8 +461,6 @@ type loggingT struct { mu sync.Mutex // file holds writer for each of the log types. file [numSeverity]flushSyncWriter - // file holds writer for the dedicated file when --log-file is set. - singleModeFile flushSyncWriter // pcs is used in V to avoid an allocation when computing the caller's PC. pcs [1]uintptr // vmap is a cache of the V Level for each V() call site, identified by PC. @@ -512,6 +494,9 @@ type loggingT struct { // If true, do not add the headers to log files skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -523,16 +508,6 @@ type buffer struct { var logging loggingT -func (l *loggingT) IsSingleMode() bool { - if l.logFile != "" { - // TODO: Remove the toStdErr reset when switching the logging.toStderr - // default value to be false. - l.toStderr = false - return true - } - return false -} - // setVState sets a consistent state for V logging. // l.mu is held. func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { @@ -607,9 +582,14 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { file = "???" line = 1 } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } } } return l.formatHeader(s, file, line), file, line @@ -758,40 +738,28 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { - // In single-mode, all severity logs are tracked in a single dedicated file. - if logging.IsSingleMode() { + logging.mu.Lock() + defer logging.mu.Unlock() + for s := fatalLog; s >= infoLog; s-- { rb := &redirectBuffer{ w: w, } - logging.singleModeFile = rb - } else { - for s := fatalLog; s >= infoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb - } + logging.file[s] = rb } } // SetOutputBySeverity sets the output destination for specific severity func SetOutputBySeverity(name string, w io.Writer) { - // In single-mode, all severity logs are tracked in a single dedicated file. - // Guarantee this buffer exists whatever severity output is trying to be set. - if logging.IsSingleMode() { - if logging.singleModeFile == nil { - logging.singleModeFile = &redirectBuffer{w: w} - } - } else { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) - } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb + logging.mu.Lock() + defer logging.mu.Unlock() + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) + } + rb := &redirectBuffer{ + w: w, } + logging.file[sev] = rb } // output writes the data to the log files and releases the buffer. @@ -803,7 +771,46 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo } } data := buf.Bytes() - l.writeLogData(s, data) + if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + } if s == fatalLog { // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { @@ -811,68 +818,36 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo timeoutFlush(10 * time.Second) os.Exit(1) } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - l.writeLogData(fatalLog, stacks(true)) + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } l.mu.Unlock() timeoutFlush(10 * time.Second) os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } l.putBuffer(buf) l.mu.Unlock() - // Note: The stats estimate logs for each severity level individually, - // even in the situation that log-file is specified and - // all severity-level logs are tracked only in the infoLog file. if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) atomic.AddInt64(&stats.bytes, int64(len(data))) } } -// writeLogData writes |data| to the `s` and lower severity files. -// e.g. If Severity level is Error, the data will be written to all the Error, -// Warning, and Info log file. However, if --log_file flag is provided, klog -// no longer tracks logs separately due to their severity level, but rather -// only write to the singleModeFile which later on will be flushed to the -// dedicated log_file. -func (l *loggingT) writeLogData(s severity, data []byte) { - shouldAlsoToStderr := l.alsoToStderr && s >= l.stderrThreshold.get() - if l.IsSingleMode() { - if shouldAlsoToStderr { - os.Stderr.Write(data) - } - if l.singleModeFile == nil { - now := time.Now() - sb := &syncBuffer{ - logger: l, - maxbytes: CalculateMaxSize(), - } - if err := sb.rotateFile(now, true); err != nil { - l.exit(err) - } - l.singleModeFile = sb - } - l.singleModeFile.Write(data) - } else { - if l.toStderr || shouldAlsoToStderr { - os.Stderr.Write(data) - } - for currSeverity := s; currSeverity >= infoLog; currSeverity-- { - if l.file[currSeverity] == nil { - if err := l.createFiles(currSeverity); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - if f := l.file[currSeverity]; f != nil { // Can be nil if -logtostderr is set. - f.Write(data) - } - } - } -} - // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds +// by Flush may deadlock when klog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) @@ -883,7 +858,7 @@ func timeoutFlush(timeout time.Duration) { select { case <-done: case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) } } @@ -1047,21 +1022,13 @@ func (l *loggingT) lockAndFlushAll() { // flushAll flushes all the logs and attempts to "sync" their data to disk. // l.mu is held. func (l *loggingT) flushAll() { - if l.IsSingleMode() { - file := l.singleModeFile + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] if file != nil { file.Flush() // ignore error file.Sync() // ignore error } - } else { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } } } @@ -1147,9 +1114,9 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2) { glog.Info("log this") } +// if klog.V(2) { klog.Info("log this") } // or -// glog.V(2).Info("log this") +// klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1223,7 +1190,7 @@ func InfoDepth(depth int, args ...interface{}) { } // Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { logging.println(infoLog, args...) } @@ -1247,7 +1214,7 @@ func WarningDepth(depth int, args ...interface{}) { } // Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { logging.println(warningLog, args...) } @@ -1271,7 +1238,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { logging.println(errorLog, args...) } @@ -1297,7 +1264,7 @@ func FatalDepth(depth int, args ...interface{}) { // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { logging.println(fatalLog, args...) } diff --git a/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go new file mode 100644 index 00000000..19783370 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + // ReportFilename is added to CustomArgs for specifying name of report file used + // by API linter. If specified, API rule violations will be printed to report file. + // Otherwise default value "-" will be used which indicates stdout. + ReportFilename string +} + +// NewDefaults returns default arguments for the generator. Returning the arguments instead +// of using default flag parsing allows registering custom arguments afterwards +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + // Default() sets a couple of flag default values for example the boilerplate. + // WithoutDefaultFlagParsing() disables implicit addition of command line flags and parsing, + // which allows registering custom arguments afterwards + genericArgs := args.Default().WithoutDefaultFlagParsing() + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kube-openapi/boilerplate/boilerplate.go.txt") + + customArgs := &CustomArgs{} + genericArgs.CustomArgs = customArgs + + // Default value for report filename is "-", which stands for stdout + customArgs.ReportFilename = "-" + // Default value for output file base name + genericArgs.OutputFileBaseName = "openapi_generated" + + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (c *CustomArgs) AddFlags(fs *pflag.FlagSet) { + fs.StringVarP(&c.ReportFilename, "report-filename", "r", c.ReportFilename, "Name of report file used by API linter to print API violations. Default \"-\" stands for standard output. NOTE that if valid filename other than \"-\" is specified, API linter won't return error on detected API violations. This allows further check of existing API violations without stopping the OpenAPI generation toolchain.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + c, ok := genericArgs.CustomArgs.(*CustomArgs) + if !ok { + return fmt.Errorf("input arguments don't contain valid custom arguments") + } + if len(c.ReportFilename) == 0 { + return fmt.Errorf("report filename cannot be empty. specify a valid filename or use \"-\" for stdout") + } + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go new file mode 100644 index 00000000..f1c87c30 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -0,0 +1,192 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "net/http" + "strings" + + "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" +) + +const ( + // TODO: Make this configurable. + ExtensionPrefix = "x-kubernetes-" + ExtensionV2Schema = ExtensionPrefix + "v2-schema" +) + +// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. +type OpenAPIDefinition struct { + Schema spec.Schema + Dependencies []string +} + +type ReferenceCallback func(path string) spec.Ref + +// GetOpenAPIDefinitions is collection of all definitions. +type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition + +// OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface, +// the definition returned by it will be used, otherwise the auto-generated definitions will be used. See +// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when +// possible. +type OpenAPIDefinitionGetter interface { + OpenAPIDefinition() *OpenAPIDefinition +} + +type OpenAPIV3DefinitionGetter interface { + OpenAPIV3Definition() *OpenAPIDefinition +} + +type PathHandler interface { + Handle(path string, handler http.Handler) +} + +// Config is set of configuration for openAPI spec generation. +type Config struct { + // List of supported protocols such as https, http, etc. + ProtocolList []string + + // Info is general information about the API. + Info *spec.Info + + // DefaultResponse will be used if an operation does not have any responses listed. It + // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. + DefaultResponse *spec.Response + + // ResponseDefinitions will be added to "responses" under the top-level swagger object. This is an object + // that holds responses definitions that can be used across operations. This property does not define + // global responses for all operations. For more info please refer: + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#fixed-fields + ResponseDefinitions map[string]spec.Response + + // CommonResponses will be added as a response to all operation specs. This is a good place to add common + // responses such as authorization failed. + CommonResponses map[int]spec.Response + + // List of webservice's path prefixes to ignore + IgnorePrefixes []string + + // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map + // or any of the models will result in spec generation failure. + GetDefinitions GetOpenAPIDefinitions + + // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. + GetOperationIDAndTags func(r *restful.Route) (string, []string, error) + + // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. + // It is an optional function to customize model names. + GetDefinitionName func(name string) (string, spec.Extensions) + + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error) + + // SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config + // is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses. + SecurityDefinitions *spec.SecurityDefinitions + + // DefaultSecurity for all operations. This will pass as spec.SwaggerProps.Security to OpenAPI. + // For most cases, this will be list of acceptable definitions in SecurityDefinitions. + DefaultSecurity []map[string][]string +} + +var schemaTypeFormatMap = map[string][]string{ + "uint": {"integer", "int32"}, + "uint8": {"integer", "byte"}, + "uint16": {"integer", "int32"}, + "uint32": {"integer", "int64"}, + "uint64": {"integer", "int64"}, + "int": {"integer", "int32"}, + "int8": {"integer", "byte"}, + "int16": {"integer", "int32"}, + "int32": {"integer", "int32"}, + "int64": {"integer", "int64"}, + "byte": {"integer", "byte"}, + "float64": {"number", "double"}, + "float32": {"number", "float"}, + "bool": {"boolean", ""}, + "time.Time": {"string", "date-time"}, + "string": {"string", ""}, + "integer": {"integer", ""}, + "number": {"number", ""}, + "boolean": {"boolean", ""}, + "[]byte": {"string", "byte"}, // base64 encoded characters + "interface{}": {"object", ""}, +} + +// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are +// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type +// comment (the comment that is added before type definition) will be lost. The spec will still have the property +// comment. The second way is to implement OpenAPIDefinitionGetter interface. That function can customize the spec (so +// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple +// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. +// Example: +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // IntOrString documentation... +// type IntOrString { ... } +// +// Adding IntOrString to this function: +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } +// +// Implement OpenAPIDefinitionGetter for IntOrString: +// +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// ... +// definitions: +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } +// +func GetOpenAPITypeFormat(typeName string) (string, string) { + mapped, ok := schemaTypeFormatMap[typeName] + if !ok { + return "", "" + } + return mapped[0], mapped[1] +} + +func EscapeJsonPointer(p string) string { + // Escaping reference name using rfc6901 + p = strings.Replace(p, "~", "~0", -1) + p = strings.Replace(p, "/", "~1", -1) + return p +} + +func EmbedOpenAPIDefinitionIntoV2Extension(main OpenAPIDefinition, embedded OpenAPIDefinition) OpenAPIDefinition { + if main.Schema.Extensions == nil { + main.Schema.Extensions = make(map[string]interface{}) + } + main.Schema.Extensions[ExtensionV2Schema] = embedded.Schema + return main +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/doc.go b/vendor/k8s.io/kube-openapi/pkg/common/doc.go new file mode 100644 index 00000000..2ba6d247 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/common/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package common holds shared code and types between open API code +// generator and spec generator. +package common diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/README.md b/vendor/k8s.io/kube-openapi/pkg/generators/README.md new file mode 100644 index 00000000..72b4e5fb --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/README.md @@ -0,0 +1,49 @@ +# Generate OpenAPI definitions + +- To generate definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. +- To exclude a type or a member from a tagged package/type, add "+k8s:openapi-gen=false" tag to the comment lines. + +# OpenAPI Extensions + +OpenAPI spec can have extensions on types. To define one or more extensions on a type or its member +add `+k8s:openapi-gen=x-kubernetes-$NAME:$VALUE` to the comment lines before type/member. A type/member can +have multiple extensions. The rest of the line in the comment will be used as $VALUE so there is no need to +escape or quote the value string. Extensions can be used to pass more information to client generators or +documentation generators. For example a type might have a friendly name to be displayed in documentation or +being used in a client's fluent interface. + +# Custom OpenAPI type definitions + +Custom types which otherwise don't map directly to OpenAPI can override their +OpenAPI definition by implementing a function named "OpenAPIDefinition" with +the following signature: + +```go + import openapi "k8s.io/kube-openapi/pkg/common" + + // ... + + type Time struct { + time.Time + } + + func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } + } +``` + +Alternatively, the type can avoid the "openapi" import by defining the following +methods. The following example produces the same OpenAPI definition as the +example above: + +```go + func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + func (_ Time) OpenAPISchemaFormat() string { return "date-time" } +``` diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go new file mode 100644 index 00000000..26b951bc --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go @@ -0,0 +1,220 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + + "k8s.io/kube-openapi/pkg/generators/rules" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" + "k8s.io/klog" +) + +const apiViolationFileType = "api-violation" + +type apiViolationFile struct { + // Since our file actually is unrelated to the package structure, use a + // path that hasn't been mangled by the framework. + unmangledPath string +} + +func (a apiViolationFile) AssembleFile(f *generator.File, path string) error { + path = a.unmangledPath + klog.V(2).Infof("Assembling file %q", path) + if path == "-" { + _, err := io.Copy(os.Stdout, &f.Body) + return err + } + + output, err := os.Create(path) + if err != nil { + return err + } + defer output.Close() + _, err = io.Copy(output, &f.Body) + return err +} + +func (a apiViolationFile) VerifyFile(f *generator.File, path string) error { + if path == "-" { + // Nothing to verify against. + return nil + } + path = a.unmangledPath + + formatted := f.Body.Bytes() + existing, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("unable to read file %q for comparison: %v", path, err) + } + if bytes.Compare(formatted, existing) == 0 { + return nil + } + + // Be nice and find the first place where they differ + // (Copied from gengo's default file type) + i := 0 + for i < len(formatted) && i < len(existing) && formatted[i] == existing[i] { + i++ + } + eDiff, fDiff := existing[i:], formatted[i:] + if len(eDiff) > 100 { + eDiff = eDiff[:100] + } + if len(fDiff) > 100 { + fDiff = fDiff[:100] + } + return fmt.Errorf("output for %q differs; first existing/expected diff: \n %q\n %q", path, string(eDiff), string(fDiff)) +} + +func newAPIViolationGen() *apiViolationGen { + return &apiViolationGen{ + linter: newAPILinter(), + } +} + +type apiViolationGen struct { + generator.DefaultGen + + linter *apiLinter +} + +func (v *apiViolationGen) FileType() string { return apiViolationFileType } +func (v *apiViolationGen) Filename() string { + return "this file is ignored by the file assembler" +} + +func (v *apiViolationGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + klog.V(5).Infof("validating API rules for type %v", t) + if err := v.linter.validate(t); err != nil { + return err + } + return nil +} + +// Finalize prints the API rule violations to report file (if specified from +// arguments) or stdout (default) +func (v *apiViolationGen) Finalize(c *generator.Context, w io.Writer) error { + // NOTE: we don't return error here because we assume that the report file will + // get evaluated afterwards to determine if error should be raised. For example, + // you can have make rules that compare the report file with existing known + // violations (whitelist) and determine no error if no change is detected. + v.linter.report(w) + return nil +} + +// apiLinter is the framework hosting multiple API rules and recording API rule +// violations +type apiLinter struct { + // API rules that implement APIRule interface and output API rule violations + rules []APIRule + violations []apiViolation +} + +// newAPILinter creates an apiLinter object with API rules in package rules. Please +// add APIRule here when new API rule is implemented. +func newAPILinter() *apiLinter { + return &apiLinter{ + rules: []APIRule{ + &rules.NamesMatch{}, + &rules.OmitEmptyMatchCase{}, + &rules.ListTypeMissing{}, + }, + } +} + +// apiViolation uniquely identifies single API rule violation +type apiViolation struct { + // Name of rule from APIRule.Name() + rule string + + packageName string + typeName string + + // Optional: name of field that violates API rule. Empty fieldName implies that + // the entire type violates the rule. + field string +} + +// apiViolations implements sort.Interface for []apiViolation based on the fields: rule, +// packageName, typeName and field. +type apiViolations []apiViolation + +func (a apiViolations) Len() int { return len(a) } +func (a apiViolations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a apiViolations) Less(i, j int) bool { + if a[i].rule != a[j].rule { + return a[i].rule < a[j].rule + } + if a[i].packageName != a[j].packageName { + return a[i].packageName < a[j].packageName + } + if a[i].typeName != a[j].typeName { + return a[i].typeName < a[j].typeName + } + return a[i].field < a[j].field +} + +// APIRule is the interface for validating API rule on Go types +type APIRule interface { + // Validate evaluates API rule on type t and returns a list of field names in + // the type that violate the rule. Empty field name [""] implies the entire + // type violates the rule. + Validate(t *types.Type) ([]string, error) + + // Name returns the name of APIRule + Name() string +} + +// validate runs all API rules on type t and records any API rule violation +func (l *apiLinter) validate(t *types.Type) error { + for _, r := range l.rules { + klog.V(5).Infof("validating API rule %v for type %v", r.Name(), t) + fields, err := r.Validate(t) + if err != nil { + return err + } + for _, field := range fields { + l.violations = append(l.violations, apiViolation{ + rule: r.Name(), + packageName: t.Name.Package, + typeName: t.Name.Name, + field: field, + }) + } + } + return nil +} + +// report prints any API rule violation to writer w and returns error if violation exists +func (l *apiLinter) report(w io.Writer) error { + sort.Sort(apiViolations(l.violations)) + for _, v := range l.violations { + fmt.Fprintf(w, "API rule violation: %s,%s,%s,%s\n", v.rule, v.packageName, v.typeName, v.field) + } + if len(l.violations) > 0 { + return fmt.Errorf("API rule violations exist") + } + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/config.go b/vendor/k8s.io/kube-openapi/pkg/generators/config.go new file mode 100644 index 00000000..33cd9eb5 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/config.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "path/filepath" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" + + generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" +) + +type identityNamer struct{} + +func (_ identityNamer) Name(t *types.Type) string { + return t.Name.String() +} + +var _ namer.Namer = identityNamer{} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer("", nil), + "sorting_namer": identityNamer{}, + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "sorting_namer" +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + header = append(header, []byte( + ` +// This file was autogenerated by openapi-gen. Do not edit it manually! + +`)...) + + reportPath := "-" + if customArgs, ok := arguments.CustomArgs.(*generatorargs.CustomArgs); ok { + reportPath = customArgs.ReportFilename + } + context.FileTypes[apiViolationFileType] = apiViolationFile{ + unmangledPath: reportPath, + } + + return generator.Packages{ + &generator.DefaultPackage{ + PackageName: filepath.Base(arguments.OutputPackagePath), + PackagePath: arguments.OutputPackagePath, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + newOpenAPIGen( + arguments.OutputFileBaseName, + arguments.OutputPackagePath, + ), + newAPIViolationGen(), + } + }, + FilterFunc: apiTypeFilterFunc, + }, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go new file mode 100644 index 00000000..14eab18f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go @@ -0,0 +1,182 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/gengo/examples/set-gen/sets" + "k8s.io/gengo/types" +) + +const extensionPrefix = "x-kubernetes-" + +// extensionAttributes encapsulates common traits for particular extensions. +type extensionAttributes struct { + xName string + kind types.Kind + allowedValues sets.String +} + +// Extension tag to openapi extension attributes +var tagToExtension = map[string]extensionAttributes{ + "patchMergeKey": { + xName: "x-kubernetes-patch-merge-key", + kind: types.Slice, + }, + "patchStrategy": { + xName: "x-kubernetes-patch-strategy", + kind: types.Slice, + allowedValues: sets.NewString("merge", "retainKeys"), + }, + "listMapKey": { + xName: "x-kubernetes-list-map-keys", + kind: types.Slice, + }, + "listType": { + xName: "x-kubernetes-list-type", + kind: types.Slice, + allowedValues: sets.NewString("atomic", "set", "map"), + }, +} + +// Extension encapsulates information necessary to generate an OpenAPI extension. +type extension struct { + idlTag string // Example: listType + xName string // Example: x-kubernetes-list-type + values []string // Example: [atomic] +} + +func (e extension) hasAllowedValues() bool { + return tagToExtension[e.idlTag].allowedValues.Len() > 0 +} + +func (e extension) allowedValues() sets.String { + return tagToExtension[e.idlTag].allowedValues +} + +func (e extension) hasKind() bool { + return len(tagToExtension[e.idlTag].kind) > 0 +} + +func (e extension) kind() types.Kind { + return tagToExtension[e.idlTag].kind +} + +func (e extension) validateAllowedValues() error { + // allowedValues not set means no restrictions on values. + if !e.hasAllowedValues() { + return nil + } + // Check for missing value. + if len(e.values) == 0 { + return fmt.Errorf("%s needs a value, none given.", e.idlTag) + } + // For each extension value, validate that it is allowed. + allowedValues := e.allowedValues() + if !allowedValues.HasAll(e.values...) { + return fmt.Errorf("%v not allowed for %s. Allowed values: %v", + e.values, e.idlTag, allowedValues.List()) + } + return nil +} + +func (e extension) validateType(kind types.Kind) error { + // If this extension class has no kind, then don't validate the type. + if !e.hasKind() { + return nil + } + if kind != e.kind() { + return fmt.Errorf("tag %s on type %v; only allowed on type %v", + e.idlTag, kind, e.kind()) + } + return nil +} + +func (e extension) hasMultipleValues() bool { + return len(e.values) > 1 +} + +// Returns sorted list of map keys. Needed for deterministic testing. +func sortedMapKeys(m map[string][]string) []string { + keys := make([]string, len(m)) + i := 0 + for k := range m { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +// Parses comments to return openapi extensions. Returns a list of +// extensions which parsed correctly, as well as a list of the +// parse errors. Validating extensions is performed separately. +// NOTE: Non-empty errors does not mean extensions is empty. +func parseExtensions(comments []string) ([]extension, []error) { + extensions := []extension{} + errors := []error{} + // First, generate extensions from "+k8s:openapi-gen=x-kubernetes-*" annotations. + values := getOpenAPITagValue(comments) + for _, val := range values { + // Example: x-kubernetes-member-tag:member_test + if strings.HasPrefix(val, extensionPrefix) { + parts := strings.SplitN(val, ":", 2) + if len(parts) != 2 { + errors = append(errors, fmt.Errorf("invalid extension value: %v", val)) + continue + } + e := extension{ + idlTag: tagName, // Example: k8s:openapi-gen + xName: parts[0], // Example: x-kubernetes-member-tag + values: []string{parts[1]}, // Example: member_test + } + extensions = append(extensions, e) + } + } + // Next, generate extensions from "idlTags" (e.g. +listType) + tagValues := types.ExtractCommentTags("+", comments) + for _, idlTag := range sortedMapKeys(tagValues) { + xAttrs, exists := tagToExtension[idlTag] + if !exists { + continue + } + values := tagValues[idlTag] + e := extension{ + idlTag: idlTag, // listType + xName: xAttrs.xName, // x-kubernetes-list-type + values: values, // [atomic] + } + extensions = append(extensions, e) + } + return extensions, errors +} + +func validateMemberExtensions(extensions []extension, m *types.Member) []error { + errors := []error{} + for _, e := range extensions { + if err := e.validateAllowedValues(); err != nil { + errors = append(errors, err) + } + if err := e.validateType(m.Type.Kind); err != nil { + errors = append(errors, err) + } + } + return errors +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go new file mode 100644 index 00000000..d42ed610 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -0,0 +1,680 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "reflect" + "sort" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + openapi "k8s.io/kube-openapi/pkg/common" + + "k8s.io/klog" +) + +// This is the comment tag that carries parameters for open API generation. +const tagName = "k8s:openapi-gen" +const tagOptional = "optional" + +// Known values for the tag. +const ( + tagValueTrue = "true" + tagValueFalse = "false" +) + +// Used for temporary validation of patch struct tags. +// TODO: Remove patch struct tag validation because they we are now consuming OpenAPI on server. +var tempPatchTags = [...]string{ + "patchMergeKey", + "patchStrategy", +} + +func getOpenAPITagValue(comments []string) []string { + return types.ExtractCommentTags("+", comments)[tagName] +} + +func getSingleTagsValue(comments []string, tag string) (string, error) { + tags, ok := types.ExtractCommentTags("+", comments)[tag] + if !ok || len(tags) == 0 { + return "", nil + } + if len(tags) > 1 { + return "", fmt.Errorf("multiple values are not allowed for tag %s", tag) + } + return tags[0], nil +} + +func hasOpenAPITagValue(comments []string, value string) bool { + tagValues := getOpenAPITagValue(comments) + for _, val := range tagValues { + if val == value { + return true + } + } + return false +} + +// hasOptionalTag returns true if the member has +optional in its comments or +// omitempty in its json tags. +func hasOptionalTag(m *types.Member) bool { + hasOptionalCommentTag := types.ExtractCommentTags( + "+", m.CommentLines)[tagOptional] != nil + hasOptionalJsonTag := strings.Contains( + reflect.StructTag(m.Tags).Get("json"), "omitempty") + return hasOptionalCommentTag || hasOptionalJsonTag +} + +func apiTypeFilterFunc(c *generator.Context, t *types.Type) bool { + // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen + if strings.HasPrefix(t.Name.Name, "codecSelfer") { + return false + } + pkg := c.Universe.Package(t.Name.Package) + if hasOpenAPITagValue(pkg.Comments, tagValueTrue) { + return !hasOpenAPITagValue(t.CommentLines, tagValueFalse) + } + if hasOpenAPITagValue(t.CommentLines, tagValueTrue) { + return true + } + return false +} + +const ( + specPackagePath = "github.com/go-openapi/spec" + openAPICommonPackagePath = "k8s.io/kube-openapi/pkg/common" +) + +// openApiGen produces a file with auto-generated OpenAPI functions. +type openAPIGen struct { + generator.DefaultGen + // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. + targetPackage string + imports namer.ImportTracker +} + +func newOpenAPIGen(sanitizedName string, targetPackage string) generator.Generator { + return &openAPIGen{ + DefaultGen: generator.DefaultGen{ + OptionalName: sanitizedName, + }, + imports: generator.NewImportTracker(), + targetPackage: targetPackage, + } +} + +const nameTmpl = "schema_$.type|private$" + +func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { + // Have the raw namer for this file track what it imports. + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.targetPackage, g.imports), + "private": &namer.NameStrategy{ + Join: func(pre string, in []string, post string) string { + return strings.Join(in, "_") + }, + PrependPackageNames: 4, // enough to fully qualify from k8s.io/api/... + }, + } +} + +func (g *openAPIGen) isOtherPackage(pkg string) bool { + if pkg == g.targetPackage { + return false + } + if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { + return false + } + return true +} + +func (g *openAPIGen) Imports(c *generator.Context) []string { + importLines := []string{} + for _, singleImport := range g.imports.ImportLines() { + importLines = append(importLines, singleImport) + } + return importLines +} + +func argsFromType(t *types.Type) generator.Args { + return generator.Args{ + "type": t, + "ReferenceCallback": types.Ref(openAPICommonPackagePath, "ReferenceCallback"), + "OpenAPIDefinition": types.Ref(openAPICommonPackagePath, "OpenAPIDefinition"), + "SpecSchemaType": types.Ref(specPackagePath, "Schema"), + } +} + +func (g *openAPIGen) Init(c *generator.Context, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + sw.Do("func GetOpenAPIDefinitions(ref $.ReferenceCallback|raw$) map[string]$.OpenAPIDefinition|raw$ {\n", argsFromType(nil)) + sw.Do("return map[string]$.OpenAPIDefinition|raw${\n", argsFromType(nil)) + + for _, t := range c.Order { + err := newOpenAPITypeWriter(sw, c).generateCall(t) + if err != nil { + return err + } + } + + sw.Do("}\n", nil) + sw.Do("}\n\n", nil) + + return sw.Error() +} + +func (g *openAPIGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + klog.V(5).Infof("generating for type %v", t) + sw := generator.NewSnippetWriter(w, c, "$", "$") + err := newOpenAPITypeWriter(sw, c).generate(t) + if err != nil { + return err + } + return sw.Error() +} + +func getJsonTags(m *types.Member) []string { + jsonTag := reflect.StructTag(m.Tags).Get("json") + if jsonTag == "" { + return []string{} + } + return strings.Split(jsonTag, ",") +} + +func getReferableName(m *types.Member) string { + jsonTags := getJsonTags(m) + if len(jsonTags) > 0 { + if jsonTags[0] == "-" { + return "" + } else { + return jsonTags[0] + } + } else { + return m.Name + } +} + +func shouldInlineMembers(m *types.Member) bool { + jsonTags := getJsonTags(m) + return len(jsonTags) > 1 && jsonTags[1] == "inline" +} + +type openAPITypeWriter struct { + *generator.SnippetWriter + context *generator.Context + refTypes map[string]*types.Type + GetDefinitionInterface *types.Type +} + +func newOpenAPITypeWriter(sw *generator.SnippetWriter, c *generator.Context) openAPITypeWriter { + return openAPITypeWriter{ + SnippetWriter: sw, + context: c, + refTypes: map[string]*types.Type{}, + } +} + +func methodReturnsValue(mt *types.Type, pkg, name string) bool { + if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { + return false + } + r := mt.Signature.Results[0] + return r.Name.Name == name && r.Name.Package == pkg +} + +func hasOpenAPIV3DefinitionMethod(t *types.Type) bool { + for mn, mt := range t.Methods { + if mn != "OpenAPIV3Definition" { + continue + } + return methodReturnsValue(mt, openAPICommonPackagePath, "OpenAPIDefinition") + } + return false +} + +func hasOpenAPIDefinitionMethod(t *types.Type) bool { + for mn, mt := range t.Methods { + if mn != "OpenAPIDefinition" { + continue + } + return methodReturnsValue(mt, openAPICommonPackagePath, "OpenAPIDefinition") + } + return false +} + +func hasOpenAPIDefinitionMethods(t *types.Type) bool { + var hasSchemaTypeMethod, hasOpenAPISchemaFormat bool + for mn, mt := range t.Methods { + switch mn { + case "OpenAPISchemaType": + hasSchemaTypeMethod = methodReturnsValue(mt, "", "[]string") + case "OpenAPISchemaFormat": + hasOpenAPISchemaFormat = methodReturnsValue(mt, "", "string") + } + } + return hasSchemaTypeMethod && hasOpenAPISchemaFormat +} + +// typeShortName returns short package name (e.g. the name x appears in package x definition) dot type name. +func typeShortName(t *types.Type) string { + return filepath.Base(t.Name.Package) + "." + t.Name.Name +} + +func (g openAPITypeWriter) generateMembers(t *types.Type, required []string) ([]string, error) { + var err error + for _, m := range t.Members { + if hasOpenAPITagValue(m.CommentLines, tagValueFalse) { + continue + } + if shouldInlineMembers(&m) { + required, err = g.generateMembers(m.Type, required) + if err != nil { + return required, err + } + continue + } + name := getReferableName(&m) + if name == "" { + continue + } + if !hasOptionalTag(&m) { + required = append(required, name) + } + if err = g.generateProperty(&m, t); err != nil { + klog.Errorf("Error when generating: %v, %v\n", name, m) + return required, err + } + } + return required, nil +} + +func (g openAPITypeWriter) generateCall(t *types.Type) error { + // Only generate for struct type and ignore the rest + switch t.Kind { + case types.Struct: + args := argsFromType(t) + g.Do("\"$.$\": ", t.Name) + + hasV2Definition := hasOpenAPIDefinitionMethod(t) + hasV2DefinitionTypeAndFormat := hasOpenAPIDefinitionMethods(t) + hasV3Definition := hasOpenAPIV3DefinitionMethod(t) + + switch { + case hasV2DefinitionTypeAndFormat: + g.Do(nameTmpl+"(ref),\n", args) + case hasV2Definition && hasV3Definition: + g.Do("common.EmbedOpenAPIDefinitionIntoV2Extension($.type|raw${}.OpenAPIV3Definition(), $.type|raw${}.OpenAPIDefinition()),\n", args) + case hasV2Definition: + g.Do("$.type|raw${}.OpenAPIDefinition(),\n", args) + case hasV3Definition: + g.Do("$.type|raw${}.OpenAPIV3Definition(),\n", args) + default: + g.Do(nameTmpl+"(ref),\n", args) + } + } + return g.Error() +} + +func (g openAPITypeWriter) generate(t *types.Type) error { + // Only generate for struct type and ignore the rest + switch t.Kind { + case types.Struct: + hasV2Definition := hasOpenAPIDefinitionMethod(t) + hasV2DefinitionTypeAndFormat := hasOpenAPIDefinitionMethods(t) + hasV3Definition := hasOpenAPIV3DefinitionMethod(t) + + if hasV2Definition || (hasV3Definition && !hasV2DefinitionTypeAndFormat) { + // already invoked directly + return nil + } + + args := argsFromType(t) + g.Do("func "+nameTmpl+"(ref $.ReferenceCallback|raw$) $.OpenAPIDefinition|raw$ {\n", args) + switch { + case hasV2DefinitionTypeAndFormat && hasV3Definition: + g.Do("return common.EmbedOpenAPIDefinitionIntoV2Extension($.type|raw${}.OpenAPIV3Definition(), $.OpenAPIDefinition|raw${\n"+ + "Schema: spec.Schema{\n"+ + "SchemaProps: spec.SchemaProps{\n", args) + g.generateDescription(t.CommentLines) + g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "})\n}\n\n", args) + return nil + case hasV2DefinitionTypeAndFormat: + g.Do("return $.OpenAPIDefinition|raw${\n"+ + "Schema: spec.Schema{\n"+ + "SchemaProps: spec.SchemaProps{\n", args) + g.generateDescription(t.CommentLines) + g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "}\n}\n\n", args) + return nil + } + g.Do("return $.OpenAPIDefinition|raw${\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", args) + g.generateDescription(t.CommentLines) + g.Do("Type: []string{\"object\"},\n", nil) + + // write members into a temporary buffer, in order to postpone writing out the Properties field. We only do + // that if it is not empty. + propertiesBuf := bytes.Buffer{} + bsw := g + bsw.SnippetWriter = generator.NewSnippetWriter(&propertiesBuf, g.context, "$", "$") + required, err := bsw.generateMembers(t, []string{}) + if err != nil { + return err + } + if propertiesBuf.Len() > 0 { + g.Do("Properties: map[string]$.SpecSchemaType|raw${\n", args) + g.Do(strings.Replace(propertiesBuf.String(), "$", "$\"$\"$", -1), nil) // escape $ (used as delimiter of the templates) + g.Do("},\n", nil) + } + + if len(required) > 0 { + g.Do("Required: []string{\"$.$\"},\n", strings.Join(required, "\",\"")) + } + g.Do("},\n", nil) + if err := g.generateStructExtensions(t); err != nil { + return err + } + g.Do("},\n", nil) + + // Map order is undefined, sort them or we may get a different file generated each time. + keys := []string{} + for k := range g.refTypes { + keys = append(keys, k) + } + sort.Strings(keys) + deps := []string{} + for _, k := range keys { + v := g.refTypes[k] + if t, _ := openapi.GetOpenAPITypeFormat(v.String()); t != "" { + // This is a known type, we do not need a reference to it + // Will eliminate special case of time.Time + continue + } + deps = append(deps, k) + } + if len(deps) > 0 { + g.Do("Dependencies: []string{\n", args) + for _, k := range deps { + g.Do("\"$.$\",", k) + } + g.Do("},\n", nil) + } + g.Do("}\n}\n\n", nil) + } + return nil +} + +func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { + extensions, errors := parseExtensions(t.CommentLines) + // Initially, we will only log struct extension errors. + if len(errors) > 0 { + for _, e := range errors { + klog.Errorf("[%s]: %s\n", t.String(), e) + } + } + unions, errors := parseUnions(t) + if len(errors) > 0 { + for _, e := range errors { + klog.Errorf("[%s]: %s\n", t.String(), e) + } + } + + // TODO(seans3): Validate struct extensions here. + g.emitExtensions(extensions, unions) + return nil +} + +func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type) error { + extensions, parseErrors := parseExtensions(m.CommentLines) + validationErrors := validateMemberExtensions(extensions, m) + errors := append(parseErrors, validationErrors...) + // Initially, we will only log member extension errors. + if len(errors) > 0 { + errorPrefix := fmt.Sprintf("[%s] %s:", parent.String(), m.String()) + for _, e := range errors { + klog.V(2).Infof("%s %s\n", errorPrefix, e) + } + } + g.emitExtensions(extensions, nil) + return nil +} + +func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union) { + // If any extensions exist, then emit code to create them. + if len(extensions) == 0 && len(unions) == 0 { + return + } + g.Do("VendorExtensible: spec.VendorExtensible{\nExtensions: spec.Extensions{\n", nil) + for _, extension := range extensions { + g.Do("\"$.$\": ", extension.xName) + if extension.hasMultipleValues() { + g.Do("[]interface{}{\n", nil) + } + for _, value := range extension.values { + g.Do("\"$.$\",\n", value) + } + if extension.hasMultipleValues() { + g.Do("},\n", nil) + } + } + if len(unions) > 0 { + g.Do("\"x-kubernetes-unions\": []interface{}{\n", nil) + for _, u := range unions { + u.emit(g) + } + g.Do("},\n", nil) + } + g.Do("},\n},\n", nil) +} + +// TODO(#44005): Move this validation outside of this generator (probably to policy verifier) +func (g openAPITypeWriter) validatePatchTags(m *types.Member, parent *types.Type) error { + // TODO: Remove patch struct tag validation because they we are now consuming OpenAPI on server. + for _, tagKey := range tempPatchTags { + structTagValue := reflect.StructTag(m.Tags).Get(tagKey) + commentTagValue, err := getSingleTagsValue(m.CommentLines, tagKey) + if err != nil { + return err + } + if structTagValue != commentTagValue { + return fmt.Errorf("Tags in comment and struct should match for member (%s) of (%s)", + m.Name, parent.Name.String()) + } + } + return nil +} + +func (g openAPITypeWriter) generateDescription(CommentLines []string) { + var buffer bytes.Buffer + delPrevChar := func() { + if buffer.Len() > 0 { + buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n" + } + } + + for _, line := range CommentLines { + // Ignore all lines after --- + if line == "---" { + break + } + line = strings.TrimRight(line, " ") + leading := strings.TrimLeft(line, " ") + switch { + case len(line) == 0: // Keep paragraphs + delPrevChar() + buffer.WriteString("\n\n") + case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs + case strings.HasPrefix(leading, "+"): // Ignore instructions to go2idl + default: + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + delPrevChar() + line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..." + } else { + line += " " + } + buffer.WriteString(line) + } + } + + postDoc := strings.TrimRight(buffer.String(), "\n") + postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to " + postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape " + postDoc = strings.Replace(postDoc, "\n", "\\n", -1) + postDoc = strings.Replace(postDoc, "\t", "\\t", -1) + postDoc = strings.Trim(postDoc, " ") + if postDoc != "" { + g.Do("Description: \"$.$\",\n", postDoc) + } +} + +func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) error { + name := getReferableName(m) + if name == "" { + return nil + } + if err := g.validatePatchTags(m, parent); err != nil { + return err + } + g.Do("\"$.$\": {\n", name) + if err := g.generateMemberExtensions(m, parent); err != nil { + return err + } + g.Do("SchemaProps: spec.SchemaProps{\n", nil) + g.generateDescription(m.CommentLines) + jsonTags := getJsonTags(m) + if len(jsonTags) > 1 && jsonTags[1] == "string" { + g.generateSimpleProperty("string", "") + g.Do("},\n},\n", nil) + return nil + } + t := resolveAliasAndPtrType(m.Type) + // If we can get a openAPI type and format for this type, we consider it to be simple property + typeString, format := openapi.GetOpenAPITypeFormat(t.String()) + if typeString != "" { + g.generateSimpleProperty(typeString, format) + g.Do("},\n},\n", nil) + return nil + } + switch t.Kind { + case types.Builtin: + return fmt.Errorf("please add type %v to getOpenAPITypeFormat function", t) + case types.Map: + if err := g.generateMapProperty(t); err != nil { + return err + } + case types.Slice, types.Array: + if err := g.generateSliceProperty(t); err != nil { + return err + } + case types.Struct, types.Interface: + g.generateReferenceProperty(t) + default: + return fmt.Errorf("cannot generate spec for type %v", t) + } + g.Do("},\n},\n", nil) + return g.Error() +} + +func (g openAPITypeWriter) generateSimpleProperty(typeString, format string) { + g.Do("Type: []string{\"$.$\"},\n", typeString) + g.Do("Format: \"$.$\",\n", format) +} + +func (g openAPITypeWriter) generateReferenceProperty(t *types.Type) { + g.refTypes[t.Name.String()] = t + g.Do("Ref: ref(\"$.$\"),\n", t.Name.String()) +} + +func resolveAliasAndPtrType(t *types.Type) *types.Type { + var prev *types.Type + for prev != t { + prev = t + if t.Kind == types.Alias { + t = t.Underlying + } + if t.Kind == types.Pointer { + t = t.Elem + } + } + return t +} + +func (g openAPITypeWriter) generateMapProperty(t *types.Type) error { + keyType := resolveAliasAndPtrType(t.Key) + elemType := resolveAliasAndPtrType(t.Elem) + + // According to OpenAPI examples, only map from string is supported + if keyType.Name.Name != "string" { + return fmt.Errorf("map with non-string keys are not supported by OpenAPI in %v", t) + } + g.Do("Type: []string{\"object\"},\n", nil) + g.Do("AdditionalProperties: &spec.SchemaOrBool{\nAllows: true,\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) + typeString, format := openapi.GetOpenAPITypeFormat(elemType.String()) + if typeString != "" { + g.generateSimpleProperty(typeString, format) + g.Do("},\n},\n},\n", nil) + return nil + } + switch elemType.Kind { + case types.Builtin: + return fmt.Errorf("please add type %v to getOpenAPITypeFormat function", elemType) + case types.Struct: + g.generateReferenceProperty(elemType) + case types.Slice, types.Array: + g.generateSliceProperty(elemType) + default: + return fmt.Errorf("map Element kind %v is not supported in %v", elemType.Kind, t.Name) + } + g.Do("},\n},\n},\n", nil) + return nil +} + +func (g openAPITypeWriter) generateSliceProperty(t *types.Type) error { + elemType := resolveAliasAndPtrType(t.Elem) + g.Do("Type: []string{\"array\"},\n", nil) + g.Do("Items: &spec.SchemaOrArray{\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) + typeString, format := openapi.GetOpenAPITypeFormat(elemType.String()) + if typeString != "" { + g.generateSimpleProperty(typeString, format) + g.Do("},\n},\n},\n", nil) + return nil + } + switch elemType.Kind { + case types.Builtin: + return fmt.Errorf("please add type %v to getOpenAPITypeFormat function", elemType) + case types.Struct: + g.generateReferenceProperty(elemType) + case types.Slice, types.Array: + g.generateSliceProperty(elemType) + default: + return fmt.Errorf("slice Element kind %v is not supported in %v", elemType.Kind, t) + } + g.Do("},\n},\n},\n", nil) + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS new file mode 100644 index 00000000..235bc545 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS @@ -0,0 +1,4 @@ +reviewers: +- roycaihw +approvers: +- roycaihw diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go new file mode 100644 index 00000000..384a44dc --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rules contains API rules that are enforced in OpenAPI spec generation +// as part of the machinery. Files under this package implement APIRule interface +// which evaluates Go type and produces list of API rule violations. +// +// Implementations of APIRule should be added to API linter under openAPIGen code- +// generator to get integrated in the generation process. +package rules diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go new file mode 100644 index 00000000..7c5ebb30 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go @@ -0,0 +1,36 @@ +package rules + +import ( + "k8s.io/gengo/types" +) + +const ListTypeIDLTag = "listType" + +// ListTypeMissing implements APIRule interface. +// A list type is required for inlined list. +type ListTypeMissing struct{} + +// Name returns the name of APIRule +func (l *ListTypeMissing) Name() string { + return "list_type_missing" +} + +// Validate evaluates API rule on type t and returns a list of field names in +// the type that violate the rule. Empty field name [""] implies the entire +// type violates the rule. +func (l *ListTypeMissing) Validate(t *types.Type) ([]string, error) { + fields := make([]string, 0) + + switch t.Kind { + case types.Struct: + for _, m := range t.Members { + if m.Type.Kind == types.Slice && types.ExtractCommentTags("+", m.CommentLines)[ListTypeIDLTag] == nil { + fields = append(fields, m.Name) + continue + } + } + } + + return fields, nil + +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go new file mode 100644 index 00000000..3a71ff17 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go @@ -0,0 +1,172 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "reflect" + "strings" + + "k8s.io/kube-openapi/pkg/util/sets" + + "k8s.io/gengo/types" +) + +var ( + // Blacklist of JSON tags that should skip match evaluation + jsonTagBlacklist = sets.NewString( + // Omitted field is ignored by the package + "-", + ) + + // Blacklist of JSON names that should skip match evaluation + jsonNameBlacklist = sets.NewString( + // Empty name is used for inline struct field (e.g. metav1.TypeMeta) + "", + // Special case for object and list meta + "metadata", + ) + + // List of substrings that aren't allowed in Go name and JSON name + disallowedNameSubstrings = sets.NewString( + // Underscore is not allowed in either name + "_", + // Dash is not allowed in either name. Note that since dash is a valid JSON tag, this should be checked + // after JSON tag blacklist check. + "-", + ) +) + +/* +NamesMatch implements APIRule interface. +Go field names must be CamelCase. JSON field names must be camelCase. Other than capitalization of the +initial letter, the two should almost always match. No underscores nor dashes in either. +This rule verifies the convention "Other than capitalization of the initial letter, the two should almost always match." +Examples (also in unit test): + Go name | JSON name | match + podSpec false + PodSpec podSpec true + PodSpec PodSpec false + podSpec podSpec false + PodSpec spec false + Spec podSpec false + JSONSpec jsonSpec true + JSONSpec jsonspec false + HTTPJSONSpec httpJSONSpec true +NOTE: this validator cannot tell two sequential all-capital words from one word, therefore the case below +is also considered matched. + HTTPJSONSpec httpjsonSpec true +NOTE: JSON names in jsonNameBlacklist should skip evaluation + true + podSpec true + podSpec - true + podSpec metadata true +*/ +type NamesMatch struct{} + +// Name returns the name of APIRule +func (n *NamesMatch) Name() string { + return "names_match" +} + +// Validate evaluates API rule on type t and returns a list of field names in +// the type that violate the rule. Empty field name [""] implies the entire +// type violates the rule. +func (n *NamesMatch) Validate(t *types.Type) ([]string, error) { + fields := make([]string, 0) + + // Only validate struct type and ignore the rest + switch t.Kind { + case types.Struct: + for _, m := range t.Members { + goName := m.Name + jsonTag, ok := reflect.StructTag(m.Tags).Lookup("json") + // Distinguish empty JSON tag and missing JSON tag. Empty JSON tag / name is + // allowed (in JSON name blacklist) but missing JSON tag is invalid. + if !ok { + fields = append(fields, goName) + continue + } + if jsonTagBlacklist.Has(jsonTag) { + continue + } + jsonName := strings.Split(jsonTag, ",")[0] + if !namesMatch(goName, jsonName) { + fields = append(fields, goName) + } + } + } + return fields, nil +} + +// namesMatch evaluates if goName and jsonName match the API rule +// TODO: Use an off-the-shelf CamelCase solution instead of implementing this logic. The following existing +// packages have been tried out: +// github.com/markbates/inflect +// github.com/segmentio/go-camelcase +// github.com/iancoleman/strcase +// github.com/fatih/camelcase +// Please see https://github.com/kubernetes/kube-openapi/pull/83#issuecomment-400842314 for more details +// about why they don't satisfy our need. What we need can be a function that detects an acronym at the +// beginning of a string. +func namesMatch(goName, jsonName string) bool { + if jsonNameBlacklist.Has(jsonName) { + return true + } + if !isAllowedName(goName) || !isAllowedName(jsonName) { + return false + } + if strings.ToLower(goName) != strings.ToLower(jsonName) { + return false + } + // Go field names must be CamelCase. JSON field names must be camelCase. + if !isCapital(goName[0]) || isCapital(jsonName[0]) { + return false + } + for i := 0; i < len(goName); i++ { + if goName[i] == jsonName[i] { + // goName[0:i-1] is uppercase and jsonName[0:i-1] is lowercase, goName[i:] + // and jsonName[i:] should match; + // goName[i] should be lowercase if i is equal to 1, e.g.: + // goName | jsonName + // PodSpec podSpec + // or uppercase if i is greater than 1, e.g.: + // goname | jsonName + // JSONSpec jsonSpec + // This is to rule out cases like: + // goname | jsonName + // JSONSpec jsonspec + return goName[i:] == jsonName[i:] && (i == 1 || isCapital(goName[i])) + } + } + return true +} + +// isCaptical returns true if one character is capital +func isCapital(b byte) bool { + return b >= 'A' && b <= 'Z' +} + +// isAllowedName checks the list of disallowedNameSubstrings and returns true if name doesn't contain +// any disallowed substring. +func isAllowedName(name string) bool { + for _, substr := range disallowedNameSubstrings.UnsortedList() { + if strings.Contains(name, substr) { + return false + } + } + return true +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go new file mode 100644 index 00000000..dd37ad8a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "reflect" + "strings" + + "k8s.io/gengo/types" +) + +// OmitEmptyMatchCase implements APIRule interface. +// "omitempty" must appear verbatim (no case variants). +type OmitEmptyMatchCase struct{} + +func (n *OmitEmptyMatchCase) Name() string { + return "omitempty_match_case" +} + +func (n *OmitEmptyMatchCase) Validate(t *types.Type) ([]string, error) { + fields := make([]string, 0) + + // Only validate struct type and ignore the rest + switch t.Kind { + case types.Struct: + for _, m := range t.Members { + goName := m.Name + jsonTag, ok := reflect.StructTag(m.Tags).Lookup("json") + if !ok { + continue + } + + parts := strings.Split(jsonTag, ",") + if len(parts) < 2 { + // no tags other than name + continue + } + if parts[0] == "-" { + // not serialized + continue + } + for _, part := range parts[1:] { + if strings.EqualFold(part, "omitempty") && part != "omitempty" { + fields = append(fields, goName) + } + } + } + } + return fields, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/union.go b/vendor/k8s.io/kube-openapi/pkg/generators/union.go new file mode 100644 index 00000000..a0281fe4 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/union.go @@ -0,0 +1,207 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "sort" + + "k8s.io/gengo/types" +) + +const tagUnionMember = "union" +const tagUnionDeprecated = "unionDeprecated" +const tagUnionDiscriminator = "unionDiscriminator" + +type union struct { + discriminator string + fieldsToDiscriminated map[string]string +} + +// emit prints the union, can be called on a nil union (emits nothing) +func (u *union) emit(g openAPITypeWriter) { + if u == nil { + return + } + g.Do("map[string]interface{}{\n", nil) + if u.discriminator != "" { + g.Do("\"discriminator\": \"$.$\",\n", u.discriminator) + } + g.Do("\"fields-to-discriminateBy\": map[string]interface{}{\n", nil) + keys := []string{} + for field := range u.fieldsToDiscriminated { + keys = append(keys, field) + } + sort.Strings(keys) + for _, field := range keys { + g.Do("\"$.$\": ", field) + g.Do("\"$.$\",\n", u.fieldsToDiscriminated[field]) + } + g.Do("},\n", nil) + g.Do("},\n", nil) +} + +// Sets the discriminator if it's not set yet, otherwise return an error +func (u *union) setDiscriminator(value string) []error { + errors := []error{} + if u.discriminator != "" { + errors = append(errors, fmt.Errorf("at least two discriminators found: %v and %v", value, u.discriminator)) + } + u.discriminator = value + return errors +} + +// Add a new member to the union +func (u *union) addMember(jsonName, variableName string) { + if _, ok := u.fieldsToDiscriminated[jsonName]; ok { + panic(fmt.Errorf("same field (%v) found multiple times", jsonName)) + } + u.fieldsToDiscriminated[jsonName] = variableName +} + +// Makes sure that the union is valid, specifically looking for re-used discriminated +func (u *union) isValid() []error { + errors := []error{} + // Case 1: discriminator but no fields + if u.discriminator != "" && len(u.fieldsToDiscriminated) == 0 { + errors = append(errors, fmt.Errorf("discriminator set with no fields in union")) + } + // Case 2: two fields have the same discriminated value + discriminated := map[string]struct{}{} + for _, d := range u.fieldsToDiscriminated { + if _, ok := discriminated[d]; ok { + errors = append(errors, fmt.Errorf("discriminated value is used twice: %v", d)) + } + discriminated[d] = struct{}{} + } + // Case 3: a field is both discriminator AND part of the union + if u.discriminator != "" { + if _, ok := u.fieldsToDiscriminated[u.discriminator]; ok { + errors = append(errors, fmt.Errorf("%v can't be both discriminator and part of the union", u.discriminator)) + } + } + return errors +} + +// Find unions either directly on the members (or inlined members, not +// going across types) or on the type itself, or on embedded types. +func parseUnions(t *types.Type) ([]union, []error) { + errors := []error{} + unions := []union{} + su, err := parseUnionStruct(t) + if su != nil { + unions = append(unions, *su) + } + errors = append(errors, err...) + eu, err := parseEmbeddedUnion(t) + unions = append(unions, eu...) + errors = append(errors, err...) + mu, err := parseUnionMembers(t) + if mu != nil { + unions = append(unions, *mu) + } + errors = append(errors, err...) + return unions, errors +} + +// Find unions in embedded types, unions shouldn't go across types. +func parseEmbeddedUnion(t *types.Type) ([]union, []error) { + errors := []error{} + unions := []union{} + for _, m := range t.Members { + if hasOpenAPITagValue(m.CommentLines, tagValueFalse) { + continue + } + if !shouldInlineMembers(&m) { + continue + } + u, err := parseUnions(m.Type) + unions = append(unions, u...) + errors = append(errors, err...) + } + return unions, errors +} + +// Look for union tag on a struct, and then include all the fields +// (except the discriminator if there is one). The struct shouldn't have +// embedded types. +func parseUnionStruct(t *types.Type) (*union, []error) { + errors := []error{} + if types.ExtractCommentTags("+", t.CommentLines)[tagUnionMember] == nil { + return nil, nil + } + + u := &union{fieldsToDiscriminated: map[string]string{}} + + for _, m := range t.Members { + jsonName := getReferableName(&m) + if jsonName == "" { + continue + } + if shouldInlineMembers(&m) { + errors = append(errors, fmt.Errorf("union structures can't have embedded fields: %v.%v", t.Name, m.Name)) + continue + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil { + errors = append(errors, fmt.Errorf("union struct can't have unionDeprecated members: %v.%v", t.Name, m.Name)) + continue + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil { + errors = append(errors, u.setDiscriminator(jsonName)...) + } else { + if !hasOptionalTag(&m) { + errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name)) + } + u.addMember(jsonName, m.Name) + } + } + + return u, errors +} + +// Find unions specifically on members. +func parseUnionMembers(t *types.Type) (*union, []error) { + errors := []error{} + u := &union{fieldsToDiscriminated: map[string]string{}} + + for _, m := range t.Members { + jsonName := getReferableName(&m) + if jsonName == "" { + continue + } + if shouldInlineMembers(&m) { + continue + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil { + errors = append(errors, u.setDiscriminator(jsonName)...) + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionMember] != nil { + errors = append(errors, fmt.Errorf("union tag is not accepted on struct members: %v.%v", t.Name, m.Name)) + continue + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil { + if !hasOptionalTag(&m) { + errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name)) + } + u.addMember(jsonName, m.Name) + } + } + if len(u.fieldsToDiscriminated) == 0 { + return nil, nil + } + return u, append(errors, u.isValid()...) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go b/vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go new file mode 100644 index 00000000..13303ea8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// NOTE: This file is copied from k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go +// because in Kubernetes we don't allowed vendor code to import staging code. See +// https://github.com/kubernetes/kube-openapi/pull/90 for more details. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/sets/string.go b/vendor/k8s.io/kube-openapi/pkg/util/sets/string.go new file mode 100644 index 00000000..53f2bc12 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/sets/string.go @@ -0,0 +1,207 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// NOTE: This file is copied from k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +// because in Kubernetes we don't allowed vendor code to import staging code. See +// https://github.com/kubernetes/kube-openapi/pull/90 for more details. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 00000000..0d639275 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go new file mode 100644 index 00000000..5365a113 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -0,0 +1,86 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pointer + +import ( + "fmt" + "reflect" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64Ptr returns a pointer to an int64 +func Int64Ptr(i int64) *int64 { + return &i +} + +// Int32PtrDerefOr dereference the int32 ptr and returns it if not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} + +// BoolPtr returns a pointer to a bool +func BoolPtr(b bool) *bool { + return &b +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index f672d88f..3b424104 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -25,32 +25,55 @@ import ( "k8s.io/klog" ) +// Field is a key value pair that provides additional details about the trace. +type Field struct { + Key string + Value interface{} +} + +func (f Field) format() string { + return fmt.Sprintf("%s:%v", f.Key, f.Value) +} + +func writeFields(b *bytes.Buffer, l []Field) { + for i, f := range l { + b.WriteString(f.format()) + if i < len(l)-1 { + b.WriteString(",") + } + } +} + type traceStep struct { stepTime time.Time msg string + fields []Field } // Trace keeps track of a set of "steps" and allows us to log a specific // step if it took longer than its share of the total allowed time type Trace struct { name string + fields []Field startTime time.Time steps []traceStep } -// New creates a Trace with the specified name -func New(name string) *Trace { - return &Trace{name, time.Now(), nil} +// New creates a Trace with the specified name. The name identifies the operation to be traced. The +// Fields add key value pairs to provide additional details about the trace, such as operation inputs. +func New(name string, fields ...Field) *Trace { + return &Trace{name: name, startTime: time.Now(), fields: fields} } -// Step adds a new step with a specific message. Call this at the end of an -// execution step to record how long it took. -func (t *Trace) Step(msg string) { +// Step adds a new step with a specific message. Call this at the end of an execution step to record +// how long it took. The Fields add key value pairs to provide additional details about the trace +// step. +func (t *Trace) Step(msg string, fields ...Field) { if t.steps == nil { // traces almost always have less than 6 steps, do this to avoid more than a single allocation t.steps = make([]traceStep, 0, 6) } - t.steps = append(t.steps, traceStep{time.Now(), msg}) + t.steps = append(t.steps, traceStep{stepTime: time.Now(), msg: msg, fields: fields}) } // Log is used to dump all the steps in the Trace @@ -65,12 +88,23 @@ func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { endTime := time.Now() totalTime := endTime.Sub(t.startTime) - buffer.WriteString(fmt.Sprintf("Trace[%d]: %q (started: %v) (total time: %v):\n", tracenum, t.name, t.startTime, totalTime)) + buffer.WriteString(fmt.Sprintf("Trace[%d]: %q ", tracenum, t.name)) + if len(t.fields) > 0 { + writeFields(&buffer, t.fields) + buffer.WriteString(" ") + } + buffer.WriteString(fmt.Sprintf("(started: %v) (total time: %v):\n", t.startTime, totalTime)) lastStepTime := t.startTime for _, step := range t.steps { stepDuration := step.stepTime.Sub(lastStepTime) if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { - buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg)) + buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] ", tracenum, step.stepTime.Sub(t.startTime), stepDuration)) + buffer.WriteString(step.msg) + if len(step.fields) > 0 { + buffer.WriteString(" ") + writeFields(&buffer, step.fields) + } + buffer.WriteString("\n") } lastStepTime = step.stepTime } diff --git a/vendor/modules.txt b/vendor/modules.txt index 6e913e28..74e241b9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,8 +1,23 @@ +# github.com/PuerkitoBio/purell v1.1.1 +github.com/PuerkitoBio/purell +# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 +github.com/PuerkitoBio/urlesc # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/evanphx/json-patch v4.2.0+incompatible +# github.com/emicklei/go-restful v2.9.5+incompatible +github.com/emicklei/go-restful +github.com/emicklei/go-restful/log +# github.com/evanphx/json-patch v4.9.0+incompatible github.com/evanphx/json-patch -# github.com/gogo/protobuf v1.2.1 +# github.com/go-openapi/jsonpointer v0.19.3 +github.com/go-openapi/jsonpointer +# github.com/go-openapi/jsonreference v0.19.2 +github.com/go-openapi/jsonreference +# github.com/go-openapi/spec v0.19.2 +github.com/go-openapi/spec +# github.com/go-openapi/swag v0.19.5 +github.com/go-openapi/swag +# github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys # github.com/golang/protobuf v1.3.1 @@ -11,6 +26,12 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp +# github.com/google/go-cmp v0.3.0 +github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags +github.com/google/go-cmp/cmp/internal/function +github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.0.0 github.com/google/gofuzz # github.com/google/uuid v1.1.2 @@ -24,21 +45,23 @@ github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/imdario/mergo v0.3.7 github.com/imdario/mergo -# github.com/json-iterator/go v1.1.6 +# github.com/json-iterator/go v1.1.7 github.com/json-iterator/go +# github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e +github.com/mailru/easyjson/jlexer +github.com/mailru/easyjson/jwriter +github.com/mailru/easyjson/buffer # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent -# github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 +# github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 -# github.com/pborman/uuid v1.2.1 -github.com/pborman/uuid # github.com/pkg/errors v0.8.1 github.com/pkg/errors -# github.com/spf13/pflag v1.0.3 +# github.com/spf13/pflag v1.0.5 github.com/spf13/pflag -# golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5 +# golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20190603091049-60506f45cf65 +# golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 golang.org/x/net/http2 golang.org/x/net/http/httpguts golang.org/x/net/http2/hpack @@ -48,7 +71,7 @@ golang.org/x/net/context # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed +# golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.2 @@ -56,8 +79,46 @@ golang.org/x/text/secure/bidirule golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/transform +golang.org/x/text/width # golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate +# golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 +golang.org/x/tools/imports +golang.org/x/tools/internal/imports +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/packages +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/module +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/internal/semver +golang.org/x/tools/internal/fastwalk +golang.org/x/tools/go/internal/gcimporter +# gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 +gonum.org/v1/gonum/graph +gonum.org/v1/gonum/graph/simple +gonum.org/v1/gonum/graph/topo +gonum.org/v1/gonum/graph/internal/ordered +gonum.org/v1/gonum/graph/internal/uid +gonum.org/v1/gonum/graph/iterator +gonum.org/v1/gonum/mat +gonum.org/v1/gonum/graph/internal/linear +gonum.org/v1/gonum/graph/internal/set +gonum.org/v1/gonum/graph/traverse +gonum.org/v1/gonum/blas +gonum.org/v1/gonum/blas/blas64 +gonum.org/v1/gonum/blas/cblas128 +gonum.org/v1/gonum/floats +gonum.org/v1/gonum/internal/asm/f64 +gonum.org/v1/gonum/lapack +gonum.org/v1/gonum/lapack/lapack64 +gonum.org/v1/gonum/blas/gonum +gonum.org/v1/gonum/lapack/gonum +gonum.org/v1/gonum/internal/asm/c128 +gonum.org/v1/gonum/internal/asm/c64 +gonum.org/v1/gonum/internal/asm/f32 +gonum.org/v1/gonum/internal/cmplx64 +gonum.org/v1/gonum/internal/math32 # google.golang.org/appengine v1.6.0 google.golang.org/appengine/urlfetch google.golang.org/appengine/internal @@ -68,10 +129,11 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 -# gopkg.in/yaml.v2 v2.2.2 +# gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0-20190516230258-a675ac48af67 +# k8s.io/api v0.16.15 k8s.io/api/core/v1 +k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 @@ -86,6 +148,7 @@ k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 +k8s.io/api/discovery/v1alpha1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/networking/v1 @@ -107,13 +170,15 @@ k8s.io/api/authentication/v1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-20190516231611-bf6753f2aa24 +# k8s.io/apiextensions-apiserver v0.16.15 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme -# k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d +# k8s.io/apimachinery v0.16.15 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/runtime @@ -153,11 +218,12 @@ k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/apis/meta/v1/unstructured -k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/pkg/apis/meta/internalversion -# k8s.io/client-go v0.0.0-20190516230509-ae8359b20417 +k8s.io/apimachinery/pkg/apis/meta/v1beta1 +k8s.io/apimachinery/pkg/api/equality +# k8s.io/client-go v0.16.15 k8s.io/client-go/rest k8s.io/client-go/tools/clientcmd k8s.io/client-go/kubernetes @@ -180,6 +246,7 @@ k8s.io/client-go/util/cert k8s.io/client-go/tools/auth k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/util/homedir +k8s.io/client-go/kubernetes/typed/admissionregistration/v1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -199,6 +266,7 @@ k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/core/v1 +k8s.io/client-go/kubernetes/typed/discovery/v1alpha1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1 @@ -226,6 +294,7 @@ k8s.io/client-go/informers/batch k8s.io/client-go/informers/certificates k8s.io/client-go/informers/coordination k8s.io/client-go/informers/core +k8s.io/client-go/informers/discovery k8s.io/client-go/informers/events k8s.io/client-go/informers/extensions k8s.io/client-go/informers/internalinterfaces @@ -243,6 +312,7 @@ k8s.io/client-go/util/connrotation k8s.io/client-go/util/keyutil k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/reference +k8s.io/client-go/informers/admissionregistration/v1 k8s.io/client-go/informers/admissionregistration/v1beta1 k8s.io/client-go/informers/apps/v1 k8s.io/client-go/informers/apps/v1beta1 @@ -258,6 +328,7 @@ k8s.io/client-go/informers/certificates/v1beta1 k8s.io/client-go/informers/coordination/v1 k8s.io/client-go/informers/coordination/v1beta1 k8s.io/client-go/informers/core/v1 +k8s.io/client-go/informers/discovery/v1alpha1 k8s.io/client-go/informers/events/v1beta1 k8s.io/client-go/informers/extensions/v1beta1 k8s.io/client-go/informers/networking/v1 @@ -275,6 +346,7 @@ k8s.io/client-go/informers/settings/v1alpha1 k8s.io/client-go/informers/storage/v1 k8s.io/client-go/informers/storage/v1alpha1 k8s.io/client-go/informers/storage/v1beta1 +k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1beta1 k8s.io/client-go/listers/apps/v1 k8s.io/client-go/listers/apps/v1beta1 @@ -289,6 +361,7 @@ k8s.io/client-go/listers/batch/v2alpha1 k8s.io/client-go/listers/certificates/v1beta1 k8s.io/client-go/listers/coordination/v1 k8s.io/client-go/listers/coordination/v1beta1 +k8s.io/client-go/listers/discovery/v1alpha1 k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 k8s.io/client-go/listers/networking/v1 @@ -306,13 +379,64 @@ k8s.io/client-go/listers/settings/v1alpha1 k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 -# k8s.io/klog v0.3.2 +# k8s.io/code-generator v0.16.15 +k8s.io/code-generator +k8s.io/code-generator/cmd/client-gen +k8s.io/code-generator/cmd/conversion-gen +k8s.io/code-generator/cmd/deepcopy-gen +k8s.io/code-generator/cmd/defaulter-gen +k8s.io/code-generator/cmd/go-to-protobuf +k8s.io/code-generator/cmd/import-boss +k8s.io/code-generator/cmd/informer-gen +k8s.io/code-generator/cmd/lister-gen +k8s.io/code-generator/cmd/openapi-gen +k8s.io/code-generator/cmd/register-gen +k8s.io/code-generator/cmd/set-gen +k8s.io/code-generator/cmd/client-gen/args +k8s.io/code-generator/cmd/client-gen/generators +k8s.io/code-generator/pkg/util +k8s.io/code-generator/cmd/conversion-gen/args +k8s.io/code-generator/cmd/conversion-gen/generators +k8s.io/code-generator/cmd/deepcopy-gen/args +k8s.io/code-generator/cmd/defaulter-gen/args +k8s.io/code-generator/cmd/go-to-protobuf/protobuf +k8s.io/code-generator/cmd/informer-gen/args +k8s.io/code-generator/cmd/informer-gen/generators +k8s.io/code-generator/cmd/lister-gen/args +k8s.io/code-generator/cmd/lister-gen/generators +k8s.io/code-generator/cmd/register-gen/args +k8s.io/code-generator/cmd/register-gen/generators +k8s.io/code-generator/cmd/client-gen/types +k8s.io/code-generator/cmd/client-gen/generators/fake +k8s.io/code-generator/cmd/client-gen/generators/scheme +k8s.io/code-generator/cmd/client-gen/generators/util +k8s.io/code-generator/cmd/client-gen/path +k8s.io/code-generator/pkg/namer +k8s.io/code-generator/third_party/forked/golang/reflect +# k8s.io/gengo v0.0.0-20190822140433-26a664648505 +k8s.io/gengo/args +k8s.io/gengo/examples/deepcopy-gen/generators +k8s.io/gengo/examples/defaulter-gen/generators +k8s.io/gengo/examples/import-boss/generators +k8s.io/gengo/examples/set-gen/generators +k8s.io/gengo/generator +k8s.io/gengo/namer +k8s.io/gengo/types +k8s.io/gengo/parser +k8s.io/gengo/examples/set-gen/sets +# k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 +# k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31 +k8s.io/kube-openapi/cmd/openapi-gen/args +k8s.io/kube-openapi/pkg/generators k8s.io/kube-openapi/pkg/util/proto -# k8s.io/utils v0.0.0-20190529001817-6999998975a7 +k8s.io/kube-openapi/pkg/common +k8s.io/kube-openapi/pkg/generators/rules +k8s.io/kube-openapi/pkg/util/sets +# k8s.io/utils v0.0.0-20190801114015-581e00157fb1 +k8s.io/utils/pointer k8s.io/utils/integer k8s.io/utils/buffer k8s.io/utils/trace -# sigs.k8s.io/yaml v1.1.0 +# sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml index 03ddc731..d20e23ef 100644 --- a/vendor/sigs.k8s.io/yaml/.travis.yml +++ b/vendor/sigs.k8s.io/yaml/.travis.yml @@ -1,14 +1,13 @@ language: go dist: xenial go: - - 1.9.x - - 1.10.x - - 1.11.x + - 1.12.x + - 1.13.x script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(gofmt -d *.go) - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) - - go tool vet . - - go test -v -race ./... + - GO111MODULE=on go vet . + - GO111MODULE=on go test -v -race ./... + - git diff --exit-code install: - - go get golang.org/x/lint/golint + - GO111MODULE=off go get golang.org/x/lint/golint diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS index 11ad7ce1..325b40b0 100644 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - dims - lavalamp diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md index 0200f75b..5a651d91 100644 --- a/vendor/sigs.k8s.io/yaml/README.md +++ b/vendor/sigs.k8s.io/yaml/README.md @@ -1,12 +1,14 @@ # YAML marshaling and unmarshaling support for Go -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) +[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml) + +kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml). ## Introduction A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). ## Compatibility @@ -32,13 +34,13 @@ GOOD: To install, run: ``` -$ go get github.com/ghodss/yaml +$ go get sigs.k8s.io/yaml ``` And import using: ``` -import "github.com/ghodss/yaml" +import "sigs.k8s.io/yaml" ``` Usage is very similar to the JSON library: @@ -49,7 +51,7 @@ package main import ( "fmt" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) type Person struct { @@ -93,7 +95,7 @@ package main import ( "fmt" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) func main() { diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod new file mode 100644 index 00000000..7224f349 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/go.mod @@ -0,0 +1,8 @@ +module sigs.k8s.io/yaml + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + gopkg.in/yaml.v2 v2.2.8 +) diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum new file mode 100644 index 00000000..76e49483 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go index 02459611..efbc535d 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -317,3 +317,64 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in return yamlObj, nil } } + +// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice, +// without going through a byte representation. A nil or empty map[string]interface{} input is +// converted to an empty map, i.e. yaml.MapSlice(nil). +// +// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice. +// +// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml: +// - float64s are down-casted as far as possible without data-loss to int, int64, uint64. +// - int64s are down-casted to int if possible without data-loss. +// +// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case. +// +// string, bool and any other types are unchanged. +func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice { + if len(j) == 0 { + return nil + } + ret := make(yaml.MapSlice, 0, len(j)) + for k, v := range j { + ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)}) + } + return ret +} + +func jsonToYAMLValue(j interface{}) interface{} { + switch j := j.(type) { + case map[string]interface{}: + if j == nil { + return interface{}(nil) + } + return JSONObjectToYAMLObject(j) + case []interface{}: + if j == nil { + return interface{}(nil) + } + ret := make([]interface{}, len(j)) + for i := range j { + ret[i] = jsonToYAMLValue(j[i]) + } + return ret + case float64: + // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151 + if i64 := int64(j); j == float64(i64) { + if i := int(i64); i64 == int64(i) { + return i + } + return i64 + } + if ui64 := uint64(j); j == float64(ui64) { + return ui64 + } + return j + case int64: + if i := int(j); j == int64(i) { + return i + } + return j + } + return j +} From 6aa6e5d0638a5f2b09c6cfe5b6bbe77734098b9b Mon Sep 17 00:00:00 2001 From: Yuqi Wang Date: Fri, 14 Jan 2022 21:08:05 +0800 Subject: [PATCH 3/7] Code --- pkg/apis/frameworkcontroller/v1/crd.go | 66 +++++++++++++++++++++----- pkg/internal/utils.go | 12 ++--- 2 files changed, 61 insertions(+), 17 deletions(-) diff --git a/pkg/apis/frameworkcontroller/v1/crd.go b/pkg/apis/frameworkcontroller/v1/crd.go index 0b2d3dee..3a94d306 100644 --- a/pkg/apis/frameworkcontroller/v1/crd.go +++ b/pkg/apis/frameworkcontroller/v1/crd.go @@ -24,7 +24,7 @@ package v1 import ( "github.com/microsoft/frameworkcontroller/pkg/common" - apiExtensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiExtensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -39,30 +39,33 @@ func BuildFrameworkCRD() *apiExtensions.CustomResourceDefinition { Name: FrameworkCRDName, }, Spec: apiExtensions.CustomResourceDefinitionSpec{ - Group: GroupName, - Version: SchemeGroupVersion.Version, - Scope: apiExtensions.NamespaceScoped, + Group: GroupName, + Versions: []apiExtensions.CustomResourceDefinitionVersion{ + { + Name: SchemeGroupVersion.Version, + Served: true, + Storage: true, + Schema: buildFrameworkSchema(), + }, + }, + Scope: apiExtensions.NamespaceScoped, Names: apiExtensions.CustomResourceDefinitionNames{ Plural: FrameworkPlural, Kind: FrameworkKind, }, - Validation: buildFrameworkValidation(), - // TODO: Enable CRD Subresources after ApiServer has supported it. - //Subresources: &apiExtensions.CustomResourceSubresources{ - // Status: &apiExtensions.CustomResourceSubresourceStatus{ - // }, - //}, }, } return crd } -func buildFrameworkValidation() *apiExtensions.CustomResourceValidation { +func buildFrameworkSchema() *apiExtensions.CustomResourceValidation { return &apiExtensions.CustomResourceValidation{ OpenAPIV3Schema: &apiExtensions.JSONSchemaProps{ + Type: "object", Properties: map[string]apiExtensions.JSONSchemaProps{ "metadata": { + Type: "object", Properties: map[string]apiExtensions.JSONSchemaProps{ "name": { Type: "string", @@ -71,8 +74,14 @@ func buildFrameworkValidation() *apiExtensions.CustomResourceValidation { }, }, "spec": { + Type: "object", Properties: map[string]apiExtensions.JSONSchemaProps{ + "description": { + Type: "string", + Nullable: true, + }, "executionType": { + Type: "string", Enum: []apiExtensions.JSON{ {Raw: []byte(common.Quote(string(ExecutionCreate)))}, {Raw: []byte(common.Quote(string(ExecutionStart)))}, @@ -80,11 +89,15 @@ func buildFrameworkValidation() *apiExtensions.CustomResourceValidation { }, }, "retryPolicy": { + Type: "object", Properties: map[string]apiExtensions.JSONSchemaProps{ "maxRetryCount": { Type: "integer", Minimum: common.PtrFloat64(ExtendedUnlimitedValue), }, + "fancyRetryPolicy": { + Type: "boolean", + }, }, }, "taskRoles": { @@ -92,6 +105,7 @@ func buildFrameworkValidation() *apiExtensions.CustomResourceValidation { Type: "array", Items: &apiExtensions.JSONSchemaPropsOrArray{ Schema: &apiExtensions.JSONSchemaProps{ + Type: "object", Properties: map[string]apiExtensions.JSONSchemaProps{ "name": { Type: "string", @@ -103,6 +117,7 @@ func buildFrameworkValidation() *apiExtensions.CustomResourceValidation { Maximum: common.PtrFloat64(10000), }, "frameworkAttemptCompletionPolicy": { + Type: "object", Properties: map[string]apiExtensions.JSONSchemaProps{ "minFailedTaskCount": { Type: "integer", @@ -116,12 +131,41 @@ func buildFrameworkValidation() *apiExtensions.CustomResourceValidation { }, }, }, + "task": { + Type: "object", + Properties: map[string]apiExtensions.JSONSchemaProps{ + "retryPolicy": { + Type: "object", + Properties: map[string]apiExtensions.JSONSchemaProps{ + "maxRetryCount": { + Type: "integer", + Minimum: common.PtrFloat64(ExtendedUnlimitedValue), + }, + "fancyRetryPolicy": { + Type: "boolean", + }, + }, + }, + "podGracefulDeletionTimeoutSec": { + Type: "integer", + }, + "pod": { + Type: "object", + XPreserveUnknownFields: common.PtrBool(true), + }, + }, + }, }, }, }, }, }, }, + "status": { + Type: "object", + Nullable: true, + XPreserveUnknownFields: common.PtrBool(true), + }, }, }, } diff --git a/pkg/internal/utils.go b/pkg/internal/utils.go index a3c05ea6..01d3e85e 100644 --- a/pkg/internal/utils.go +++ b/pkg/internal/utils.go @@ -28,7 +28,7 @@ import ( frameworkClient "github.com/microsoft/frameworkcontroller/pkg/client/clientset/versioned" "github.com/microsoft/frameworkcontroller/pkg/common" core "k8s.io/api/core/v1" - apiExtensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiExtensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiErrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -72,7 +72,7 @@ func PutCRD( func DeleteCRD(config *rest.Config, name string) { client := createCRDClient(config) - err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(name, meta.NewDeleteOptions(0)) + err := client.ApiextensionsV1().CustomResourceDefinitions().Delete(name, meta.NewDeleteOptions(0)) if err != nil && !apiErrors.IsNotFound(err) { panic(fmt.Errorf("Failed to delete CRD: %v", err)) } else { @@ -93,20 +93,20 @@ func putCRDInternal( client apiClient.Interface, newCRD *apiExtensions.CustomResourceDefinition, establishedCheckIntervalSec *int64, establishedCheckTimeoutSec *int64) error { - remoteCRD, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(newCRD.Name, meta.GetOptions{}) + remoteCRD, err := client.ApiextensionsV1().CustomResourceDefinitions().Get(newCRD.Name, meta.GetOptions{}) if err == nil { klog.Infof("Update CRD %v", newCRD.Name) if !reflect.DeepEqual(remoteCRD.Spec, newCRD.Spec) { updateCRD := remoteCRD updateCRD.Spec = newCRD.Spec - remoteCRD, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Update(updateCRD) + remoteCRD, err = client.ApiextensionsV1().CustomResourceDefinitions().Update(updateCRD) if err != nil { return err } } } else if apiErrors.IsNotFound(err) { klog.Infof("Create CRD %v", newCRD.Name) - remoteCRD, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(newCRD) + remoteCRD, err = client.ApiextensionsV1().CustomResourceDefinitions().Create(newCRD) if err != nil { return err } @@ -121,7 +121,7 @@ func putCRDInternal( common.SecToDuration(establishedCheckIntervalSec), common.SecToDuration(establishedCheckTimeoutSec), func() (bool, error) { - remoteCRD, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(newCRD.Name, meta.GetOptions{}) + remoteCRD, err = client.ApiextensionsV1().CustomResourceDefinitions().Get(newCRD.Name, meta.GetOptions{}) if err != nil { return false, err } From 1d015cb15455756556ab1e1ea67bfd9d254a2b29 Mon Sep 17 00:00:00 2001 From: Yuqi Wang Date: Fri, 14 Jan 2022 13:10:13 +0000 Subject: [PATCH 4/7] Update dep --- vendor/modules.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vendor/modules.txt b/vendor/modules.txt index 74e241b9..29b0bdb5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -171,13 +171,13 @@ k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 # k8s.io/apiextensions-apiserver v0.16.15 -k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 # k8s.io/apimachinery v0.16.15 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/apis/meta/v1 @@ -194,6 +194,7 @@ k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/validation/field @@ -222,7 +223,6 @@ k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/apis/meta/v1beta1 -k8s.io/apimachinery/pkg/api/equality # k8s.io/client-go v0.16.15 k8s.io/client-go/rest k8s.io/client-go/tools/clientcmd From 8dbbab6e25d576d8c777468e5f639daaefe57be8 Mon Sep 17 00:00:00 2001 From: Yuqi Wang Date: Fri, 14 Jan 2022 13:31:01 +0000 Subject: [PATCH 5/7] Update codegen --- hack/update-codegen.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 7f2ca8ee..5bcc63e4 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -31,8 +31,7 @@ PROJECT_DIR=${BASH_DIR}/.. cd ${PROJECT_DIR} -chmod a+x ./vendor/k8s.io/code-generator/generate-groups.sh -./vendor/k8s.io/code-generator/generate-groups.sh \ +bash ./vendor/k8s.io/code-generator/generate-groups.sh \ "all" \ github.com/microsoft/frameworkcontroller/pkg/client \ github.com/microsoft/frameworkcontroller/pkg/apis \ From 7315aecafaf90eb615b881f9c75930625f7ed82a Mon Sep 17 00:00:00 2001 From: Yuqi Wang Date: Fri, 14 Jan 2022 21:43:12 +0800 Subject: [PATCH 6/7] Doc --- doc/user-manual.md | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/doc/user-manual.md b/doc/user-manual.md index 750944f1..c115b761 100644 --- a/doc/user-manual.md +++ b/doc/user-manual.md @@ -40,8 +40,8 @@ As Framework is actually a [Kubernetes CRD](https://kubernetes.io/docs/concepts/ | API Kind | Operations | |:---- |:---- | | Framework | [CREATE](#CREATE_Framework) [DELETE](#DELETE_Framework) [GET](#GET_Framework) [LIST](#LIST_Frameworks) [WATCH](#WATCH_Framework) [WATCH_LIST](#WATCH_LIST_Frameworks)
[PATCH](#PATCH_Framework) ([Start](#Start_Framework), [Stop](#Stop_Framework), [Add TaskRole](#Add_TaskRole), [Delete TaskRole](#Delete_TaskRole), [Add/Delete Task](#Add_Delete_Task)) | -| [ConfigMap](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#configmap-v1-core) | All operations except for [CREATE](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#create-configmap-v1-core) [PUT](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#replace-configmap-v1-core) [PATCH](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#patch-configmap-v1-core) | -| [Pod](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#pod-v1-core) | All operations except for [CREATE](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#create-pod-v1-core) [PUT](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#replace-pod-v1-core) [PATCH](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#patch-pod-v1-core) | +| [ConfigMap](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmap-v1-core) | All operations except for [CREATE](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#create-configmap-v1-core) [PUT](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#replace-configmap-v1-core) [PATCH](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#patch-configmap-v1-core) | +| [Pod](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#pod-v1-core) | All operations except for [CREATE](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#create-pod-v1-core) [PUT](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#replace-pod-v1-core) [PATCH](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#patch-pod-v1-core) | #### CREATE Framework **Request** @@ -65,7 +65,7 @@ Any [ExecutionType](#FrameworkExecutionType) can be specified to create the Fram | OK(200) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | | Created(201) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | | Accepted(202) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | -| Conflict(409) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework already exists. | +| Conflict(409) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework already exists. | #### PATCH Framework ##### Start Framework @@ -98,7 +98,7 @@ Before the Start, the Framework will not start to run or complete, but the objec | Code | Body | Description | |:---- |:---- |:---- | | OK(200) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | -| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is not found. | +| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is not found. | ##### Stop Framework **Request** @@ -130,7 +130,7 @@ After the Stop, the Framework will start to complete, but the object of the Fram | Code | Body | Description | |:---- |:---- |:---- | | OK(200) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | -| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is not found. | +| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is not found. | ##### Add TaskRole **Request** @@ -164,7 +164,7 @@ See more in [Framework ScaleUp/ScaleDown](#FrameworkRescale). | Code | Body | Description | |:---- |:---- |:---- | | OK(200) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | -| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is not found. | +| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is not found. | ##### Delete TaskRole **Request** @@ -205,8 +205,8 @@ Notes: | Code | Body | Description | |:---- |:---- |:---- | | OK(200) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | -| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is not found. | -| UnprocessableEntity(422) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified $(TaskRoleName) does not exist or does not match the specified $(TaskRoleIndex). | +| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is not found. | +| UnprocessableEntity(422) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified $(TaskRoleName) does not exist or does not match the specified $(TaskRoleIndex). | ##### Add/Delete Task **Request** @@ -272,8 +272,8 @@ See more in [Framework ScaleUp/ScaleDown](#FrameworkRescale). | Code | Body | Description | |:---- |:---- |:---- | | OK(200) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | -| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is not found. | -| UnprocessableEntity(422) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified $(TaskRoleName) does not exist or does not match the specified $(TaskRoleIndex). | +| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is not found. | +| UnprocessableEntity(422) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified $(TaskRoleName) does not exist or does not match the specified $(TaskRoleIndex). | #### DELETE Framework **Request** @@ -301,15 +301,15 @@ Delete the specified Framework. Notes: * If you need to achieve all the [Framework ConsistencyGuarantees](#ConsistencyGuarantees) or achieve higher [Framework Availability](#FrameworkAvailability) by leveraging the [PodGracefulDeletionTimeoutSec](../pkg/apis/frameworkcontroller/v1/types.go), you should always use and only use the [Foreground Deletion](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#foreground-cascading-deletion) in the provided body. -* However, `kubectl delete` does not support to specify the Foreground Deletion at least for [Kubernetes v1.14.2](https://github.com/kubernetes/kubernetes/issues/66110#issuecomment-413761559), so you may have to use other [Supported Client](#SupportedClient). +* However, `kubectl delete` does not support to specify the Foreground Deletion at least for [Kubernetes v1.23.2](https://github.com/kubernetes/kubernetes/issues/66110#issuecomment-413761559), so you may have to use other [Supported Client](#SupportedClient). **Response** | Code | Body | Description | |:---- |:---- |:---- | | OK(200) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | The specified Framework is deleting.
Return current Framework. | -| OK(200) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is deleted. | -| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is not found. | +| OK(200) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is deleted. | +| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is not found. | #### GET Framework **Request** @@ -325,7 +325,7 @@ Get the specified Framework. | Code | Body | Description | |:---- |:---- |:---- | | OK(200) | [Framework](../pkg/apis/frameworkcontroller/v1/types.go) | Return current Framework. | -| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is not found. | +| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is not found. | #### LIST Frameworks **Request** @@ -333,7 +333,7 @@ Get the specified Framework. GET /apis/frameworkcontroller.microsoft.com/v1/namespaces/{FrameworkNamespace}/frameworks GET /apis/frameworkcontroller.microsoft.com/v1/frameworks -QueryParameters: Same as [StatefulSet QueryParameters](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#list-statefulset-v1-apps) +QueryParameters: Same as [StatefulSet QueryParameters](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#list-statefulset-v1-apps) **Description** @@ -350,7 +350,7 @@ Get all Frameworks (in the specified FrameworkNamespace). GET /apis/frameworkcontroller.microsoft.com/v1/watch/namespaces/{FrameworkNamespace}/frameworks/{FrameworkName} -QueryParameters: Same as [StatefulSet QueryParameters](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#watch-statefulset-v1-apps) +QueryParameters: Same as [StatefulSet QueryParameters](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#watch-statefulset-v1-apps) **Description** @@ -360,8 +360,8 @@ Watch the change events of the specified Framework. | Code | Body | Description | |:---- |:---- |:---- | -| OK(200) | [WatchEvent](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#watchevent-v1-meta) | Streaming the change events of the specified Framework. | -| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) | The specified Framework is not found. | +| OK(200) | [WatchEvent](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#watchevent-v1-meta) | Streaming the change events of the specified Framework. | +| NotFound(404) | [Status](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#status-v1-meta) | The specified Framework is not found. | #### WATCH_LIST Frameworks **Request** @@ -369,7 +369,7 @@ Watch the change events of the specified Framework. GET /apis/frameworkcontroller.microsoft.com/v1/watch/namespaces/{FrameworkNamespace}/frameworks GET /apis/frameworkcontroller.microsoft.com/v1/watch/frameworks -QueryParameters: Same as [StatefulSet QueryParameters](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#watch-list-statefulset-v1-apps) +QueryParameters: Same as [StatefulSet QueryParameters](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#watch-list-statefulset-v1-apps) **Description** @@ -379,7 +379,7 @@ Watch the change events of all Frameworks (in the specified FrameworkNamespace). | Code | Body | Description | |:---- |:---- |:---- | -| OK(200) | [WatchEvent](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#watchevent-v1-meta) | Streaming the change events of all Frameworks (in the specified FrameworkNamespace). | +| OK(200) | [WatchEvent](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#watchevent-v1-meta) | Streaming the change events of all Frameworks (in the specified FrameworkNamespace). | ## Framework ExecutionType Framework [ExecutionType](../pkg/apis/frameworkcontroller/v1/types.go) can be specified to control the execution of the Framework: @@ -422,7 +422,7 @@ spec: image: ubuntu:trusty command: ["sh", "-c", "printenv && sleep infinity"] ``` -2. Use above creation response's `metadata.uid` to override below {{FrameworkUID}}, and [Create ServiceAccount](https://v1-14.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#create-serviceaccount-v1-core) with above Framework reference as below: +2. Use above creation response's `metadata.uid` to override below {{FrameworkUID}}, and [Create ServiceAccount](https://v1-14.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#create-serviceaccount-v1-core) with above Framework reference as below: ```yaml apiVersion: v1 kind: ServiceAccount From 39d637ca9b32c331ceee65098a40370f07f252b8 Mon Sep 17 00:00:00 2001 From: Yuqi Wang Date: Fri, 14 Jan 2022 21:46:23 +0800 Subject: [PATCH 7/7] Doc --- doc/user-manual.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/user-manual.md b/doc/user-manual.md index c115b761..73de1b0a 100644 --- a/doc/user-manual.md +++ b/doc/user-manual.md @@ -422,7 +422,7 @@ spec: image: ubuntu:trusty command: ["sh", "-c", "printenv && sleep infinity"] ``` -2. Use above creation response's `metadata.uid` to override below {{FrameworkUID}}, and [Create ServiceAccount](https://v1-14.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#create-serviceaccount-v1-core) with above Framework reference as below: +2. Use above creation response's `metadata.uid` to override below {{FrameworkUID}}, and [Create ServiceAccount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#create-serviceaccount-v1-core) with above Framework reference as below: ```yaml apiVersion: v1 kind: ServiceAccount